diff options
Diffstat (limited to 'drivers/nvme')
46 files changed, 10867 insertions, 2298 deletions
diff --git a/drivers/nvme/common/Kconfig b/drivers/nvme/common/Kconfig index 244432e0b73d..da963e4f3f1f 100644 --- a/drivers/nvme/common/Kconfig +++ b/drivers/nvme/common/Kconfig @@ -12,3 +12,4 @@ config NVME_AUTH select CRYPTO_SHA512 select CRYPTO_DH select CRYPTO_DH_RFC7919_GROUPS + select CRYPTO_HKDF diff --git a/drivers/nvme/common/auth.c b/drivers/nvme/common/auth.c index a3455f1d67fa..e07e7d4bf8b6 100644 --- a/drivers/nvme/common/auth.c +++ b/drivers/nvme/common/auth.c @@ -8,12 +8,15 @@ #include <linux/base64.h> #include <linux/prandom.h> #include <linux/scatterlist.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/hash.h> #include <crypto/dh.h> +#include <crypto/hkdf.h> #include <linux/nvme.h> #include <linux/nvme-auth.h> +#define HKDF_MAX_HASHLEN 64 + static u32 nvme_dhchap_seqnum; static DEFINE_MUTEX(nvme_dhchap_mutex); @@ -175,7 +178,7 @@ struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, if (!key) return ERR_PTR(-ENOMEM); - key_len = base64_decode(secret, allocated_len, key->key); + key_len = base64_decode(secret, allocated_len, key->key, true, BASE64_STD); if (key_len < 0) { pr_debug("base64 key decoding error %d\n", key_len); @@ -239,7 +242,7 @@ struct nvme_dhchap_key *nvme_auth_transform_key( { const char *hmac_name; struct crypto_shash *key_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, key_tfm); struct nvme_dhchap_key *transformed_key; int ret, key_len; @@ -264,19 +267,11 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (IS_ERR(key_tfm)) return ERR_CAST(key_tfm); - shash = kmalloc(sizeof(struct shash_desc) + - crypto_shash_descsize(key_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_key; - } - key_len = crypto_shash_digestsize(key_tfm); transformed_key = nvme_auth_alloc_key(key_len, key->hash); if (!transformed_key) { ret = -ENOMEM; - goto out_free_shash; + goto out_free_key; } shash->tfm = key_tfm; @@ -296,15 +291,12 @@ struct nvme_dhchap_key *nvme_auth_transform_key( if (ret < 0) goto out_free_transformed_key; - kfree(shash); crypto_free_shash(key_tfm); return transformed_key; out_free_transformed_key: nvme_auth_free_key(transformed_key); -out_free_shash: - kfree(shash); out_free_key: crypto_free_shash(key_tfm); @@ -471,5 +463,385 @@ int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) } EXPORT_SYMBOL_GPL(nvme_auth_generate_key); +/** + * nvme_auth_generate_psk - Generate a PSK for TLS + * @hmac_id: Hash function identifier + * @skey: Session key + * @skey_len: Length of @skey + * @c1: Value of challenge C1 + * @c2: Value of challenge C2 + * @hash_len: Hash length of the hash algorithm + * @ret_psk: Pointer to the resulting generated PSK + * @ret_len: length of @ret_psk + * + * Generate a PSK for TLS as specified in NVMe base specification, section + * 8.13.5.9: Generated PSK for TLS + * + * The generated PSK for TLS shall be computed applying the HMAC function + * using the hash function H( ) selected by the HashID parameter in the + * DH-HMAC-CHAP_Challenge message with the session key KS as key to the + * concatenation of the two challenges C1 and C2 (i.e., generated + * PSK = HMAC(KS, C1 || C2)). + * + * Returns 0 on success with a valid generated PSK pointer in @ret_psk and + * the length of @ret_psk in @ret_len, or a negative error number otherwise. + */ +int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len, + u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len) +{ + struct crypto_shash *tfm; + SHASH_DESC_ON_STACK(shash, tfm); + u8 *psk; + const char *hmac_name; + int ret, psk_len; + + if (!c1 || !c2) + return -EINVAL; + + hmac_name = nvme_auth_hmac_name(hmac_id); + if (!hmac_name) { + pr_warn("%s: invalid hash algorithm %d\n", + __func__, hmac_id); + return -EINVAL; + } + + tfm = crypto_alloc_shash(hmac_name, 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + psk_len = crypto_shash_digestsize(tfm); + psk = kzalloc(psk_len, GFP_KERNEL); + if (!psk) { + ret = -ENOMEM; + goto out_free_tfm; + } + + shash->tfm = tfm; + ret = crypto_shash_setkey(tfm, skey, skey_len); + if (ret) + goto out_free_psk; + + ret = crypto_shash_init(shash); + if (ret) + goto out_free_psk; + + ret = crypto_shash_update(shash, c1, hash_len); + if (ret) + goto out_free_psk; + + ret = crypto_shash_update(shash, c2, hash_len); + if (ret) + goto out_free_psk; + + ret = crypto_shash_final(shash, psk); + if (!ret) { + *ret_psk = psk; + *ret_len = psk_len; + } + +out_free_psk: + if (ret) + kfree_sensitive(psk); +out_free_tfm: + crypto_free_shash(tfm); + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_generate_psk); + +/** + * nvme_auth_generate_digest - Generate TLS PSK digest + * @hmac_id: Hash function identifier + * @psk: Generated input PSK + * @psk_len: Length of @psk + * @subsysnqn: NQN of the subsystem + * @hostnqn: NQN of the host + * @ret_digest: Pointer to the returned digest + * + * Generate a TLS PSK digest as specified in TP8018 Section 3.6.1.3: + * TLS PSK and PSK identity Derivation + * + * The PSK digest shall be computed by encoding in Base64 (refer to RFC + * 4648) the result of the application of the HMAC function using the hash + * function specified in item 4 above (ie the hash function of the cipher + * suite associated with the PSK identity) with the PSK as HMAC key to the + * concatenation of: + * - the NQN of the host (i.e., NQNh) not including the null terminator; + * - a space character; + * - the NQN of the NVM subsystem (i.e., NQNc) not including the null + * terminator; + * - a space character; and + * - the seventeen ASCII characters "NVMe-over-Fabrics" + * (i.e., <PSK digest> = Base64(HMAC(PSK, NQNh || " " || NQNc || " " || + * "NVMe-over-Fabrics"))). + * The length of the PSK digest depends on the hash function used to compute + * it as follows: + * - If the SHA-256 hash function is used, the resulting PSK digest is 44 + * characters long; or + * - If the SHA-384 hash function is used, the resulting PSK digest is 64 + * characters long. + * + * Returns 0 on success with a valid digest pointer in @ret_digest, or a + * negative error number on failure. + */ +int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len, + char *subsysnqn, char *hostnqn, u8 **ret_digest) +{ + struct crypto_shash *tfm; + SHASH_DESC_ON_STACK(shash, tfm); + u8 *digest, *enc; + const char *hmac_name; + size_t digest_len, hmac_len; + int ret; + + if (WARN_ON(!subsysnqn || !hostnqn)) + return -EINVAL; + + hmac_name = nvme_auth_hmac_name(hmac_id); + if (!hmac_name) { + pr_warn("%s: invalid hash algorithm %d\n", + __func__, hmac_id); + return -EINVAL; + } + + switch (nvme_auth_hmac_hash_len(hmac_id)) { + case 32: + hmac_len = 44; + break; + case 48: + hmac_len = 64; + break; + default: + pr_warn("%s: invalid hash algorithm '%s'\n", + __func__, hmac_name); + return -EINVAL; + } + + enc = kzalloc(hmac_len + 1, GFP_KERNEL); + if (!enc) + return -ENOMEM; + + tfm = crypto_alloc_shash(hmac_name, 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + goto out_free_enc; + } + + digest_len = crypto_shash_digestsize(tfm); + digest = kzalloc(digest_len, GFP_KERNEL); + if (!digest) { + ret = -ENOMEM; + goto out_free_tfm; + } + + shash->tfm = tfm; + ret = crypto_shash_setkey(tfm, psk, psk_len); + if (ret) + goto out_free_digest; + + ret = crypto_shash_init(shash); + if (ret) + goto out_free_digest; + + ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn)); + if (ret) + goto out_free_digest; + + ret = crypto_shash_update(shash, " ", 1); + if (ret) + goto out_free_digest; + + ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn)); + if (ret) + goto out_free_digest; + + ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18); + if (ret) + goto out_free_digest; + + ret = crypto_shash_final(shash, digest); + if (ret) + goto out_free_digest; + + ret = base64_encode(digest, digest_len, enc, true, BASE64_STD); + if (ret < hmac_len) { + ret = -ENOKEY; + goto out_free_digest; + } + *ret_digest = enc; + ret = 0; + +out_free_digest: + kfree_sensitive(digest); +out_free_tfm: + crypto_free_shash(tfm); +out_free_enc: + if (ret) + kfree_sensitive(enc); + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_generate_digest); + +/** + * hkdf_expand_label - HKDF-Expand-Label (RFC 8846 section 7.1) + * @hmac_tfm: hash context keyed with pseudorandom key + * @label: ASCII label without "tls13 " prefix + * @labellen: length of @label + * @context: context bytes + * @contextlen: length of @context + * @okm: output keying material + * @okmlen: length of @okm + * + * Build the TLS 1.3 HkdfLabel structure and invoke hkdf_expand(). + * + * Returns 0 on success with output keying material stored in @okm, + * or a negative errno value otherwise. + */ +static int hkdf_expand_label(struct crypto_shash *hmac_tfm, + const u8 *label, unsigned int labellen, + const u8 *context, unsigned int contextlen, + u8 *okm, unsigned int okmlen) +{ + int err; + u8 *info; + unsigned int infolen; + const char *tls13_prefix = "tls13 "; + unsigned int prefixlen = strlen(tls13_prefix); + + if (WARN_ON(labellen > (255 - prefixlen))) + return -EINVAL; + if (WARN_ON(contextlen > 255)) + return -EINVAL; + + infolen = 2 + (1 + prefixlen + labellen) + (1 + contextlen); + info = kzalloc(infolen, GFP_KERNEL); + if (!info) + return -ENOMEM; + + /* HkdfLabel.Length */ + put_unaligned_be16(okmlen, info); + + /* HkdfLabel.Label */ + info[2] = prefixlen + labellen; + memcpy(info + 3, tls13_prefix, prefixlen); + memcpy(info + 3 + prefixlen, label, labellen); + + /* HkdfLabel.Context */ + info[3 + prefixlen + labellen] = contextlen; + memcpy(info + 4 + prefixlen + labellen, context, contextlen); + + err = hkdf_expand(hmac_tfm, info, infolen, okm, okmlen); + kfree_sensitive(info); + return err; +} + +/** + * nvme_auth_derive_tls_psk - Derive TLS PSK + * @hmac_id: Hash function identifier + * @psk: generated input PSK + * @psk_len: size of @psk + * @psk_digest: TLS PSK digest + * @ret_psk: Pointer to the resulting TLS PSK + * + * Derive a TLS PSK as specified in TP8018 Section 3.6.1.3: + * TLS PSK and PSK identity Derivation + * + * The TLS PSK shall be derived as follows from an input PSK + * (i.e., either a retained PSK or a generated PSK) and a PSK + * identity using the HKDF-Extract and HKDF-Expand-Label operations + * (refer to RFC 5869 and RFC 8446) where the hash function is the + * one specified by the hash specifier of the PSK identity: + * 1. PRK = HKDF-Extract(0, Input PSK); and + * 2. TLS PSK = HKDF-Expand-Label(PRK, "nvme-tls-psk", PskIdentityContext, L), + * where PskIdentityContext is the hash identifier indicated in + * the PSK identity concatenated to a space character and to the + * Base64 PSK digest (i.e., "<hash> <PSK digest>") and L is the + * output size in bytes of the hash function (i.e., 32 for SHA-256 + * and 48 for SHA-384). + * + * Returns 0 on success with a valid psk pointer in @ret_psk or a negative + * error number otherwise. + */ +int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len, + u8 *psk_digest, u8 **ret_psk) +{ + struct crypto_shash *hmac_tfm; + const char *hmac_name; + const char *label = "nvme-tls-psk"; + static const char default_salt[HKDF_MAX_HASHLEN]; + size_t prk_len; + const char *ctx; + unsigned char *prk, *tls_key; + int ret; + + hmac_name = nvme_auth_hmac_name(hmac_id); + if (!hmac_name) { + pr_warn("%s: invalid hash algorithm %d\n", + __func__, hmac_id); + return -EINVAL; + } + if (hmac_id == NVME_AUTH_HASH_SHA512) { + pr_warn("%s: unsupported hash algorithm %s\n", + __func__, hmac_name); + return -EINVAL; + } + + hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0); + if (IS_ERR(hmac_tfm)) + return PTR_ERR(hmac_tfm); + + prk_len = crypto_shash_digestsize(hmac_tfm); + prk = kzalloc(prk_len, GFP_KERNEL); + if (!prk) { + ret = -ENOMEM; + goto out_free_shash; + } + + if (WARN_ON(prk_len > HKDF_MAX_HASHLEN)) { + ret = -EINVAL; + goto out_free_prk; + } + ret = hkdf_extract(hmac_tfm, psk, psk_len, + default_salt, prk_len, prk); + if (ret) + goto out_free_prk; + + ret = crypto_shash_setkey(hmac_tfm, prk, prk_len); + if (ret) + goto out_free_prk; + + ctx = kasprintf(GFP_KERNEL, "%02d %s", hmac_id, psk_digest); + if (!ctx) { + ret = -ENOMEM; + goto out_free_prk; + } + + tls_key = kzalloc(psk_len, GFP_KERNEL); + if (!tls_key) { + ret = -ENOMEM; + goto out_free_ctx; + } + ret = hkdf_expand_label(hmac_tfm, + label, strlen(label), + ctx, strlen(ctx), + tls_key, psk_len); + if (ret) { + kfree(tls_key); + goto out_free_ctx; + } + *ret_psk = tls_key; + +out_free_ctx: + kfree(ctx); +out_free_prk: + kfree(prk); +out_free_shash: + crypto_free_shash(hmac_tfm); + + return ret; +} +EXPORT_SYMBOL_GPL(nvme_auth_derive_tls_psk); + MODULE_DESCRIPTION("NVMe Authentication framework"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/nvme/common/keyring.c b/drivers/nvme/common/keyring.c index 6f7e7a8fa5ae..32d16c53133b 100644 --- a/drivers/nvme/common/keyring.c +++ b/drivers/nvme/common/keyring.c @@ -5,7 +5,6 @@ #include <linux/module.h> #include <linux/seq_file.h> -#include <linux/key.h> #include <linux/key-type.h> #include <keys/user-type.h> #include <linux/nvme.h> @@ -20,6 +19,28 @@ key_serial_t nvme_keyring_id(void) } EXPORT_SYMBOL_GPL(nvme_keyring_id); +static bool nvme_tls_psk_revoked(struct key *psk) +{ + return test_bit(KEY_FLAG_REVOKED, &psk->flags) || + test_bit(KEY_FLAG_INVALIDATED, &psk->flags); +} + +struct key *nvme_tls_key_lookup(key_serial_t key_id) +{ + struct key *key = key_lookup(key_id); + + if (IS_ERR(key)) { + pr_err("key id %08x not found\n", key_id); + return key; + } + if (nvme_tls_psk_revoked(key)) { + pr_err("key id %08x revoked\n", key_id); + return ERR_PTR(-EKEYREVOKED); + } + return key; +} +EXPORT_SYMBOL_GPL(nvme_tls_key_lookup); + static void nvme_tls_psk_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); @@ -36,14 +57,12 @@ static bool nvme_tls_psk_match(const struct key *key, pr_debug("%s: no key description\n", __func__); return false; } - match_len = strlen(key->description); - pr_debug("%s: id %s len %zd\n", __func__, key->description, match_len); - if (!match_data->raw_data) { pr_debug("%s: no match data\n", __func__); return false; } match_id = match_data->raw_data; + match_len = strlen(match_id); pr_debug("%s: match '%s' '%s' len %zd\n", __func__, match_id, key->description, match_len); return !memcmp(key->description, match_id, match_len); @@ -71,7 +90,7 @@ static struct key_type nvme_tls_psk_key_type = { static struct key *nvme_tls_psk_lookup(struct key *keyring, const char *hostnqn, const char *subnqn, - int hmac, bool generated) + u8 hmac, u8 psk_ver, bool generated) { char *identity; size_t identity_len = (NVMF_NQN_SIZE) * 2 + 11; @@ -82,8 +101,8 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring, if (!identity) return ERR_PTR(-ENOMEM); - snprintf(identity, identity_len, "NVMe0%c%02d %s %s", - generated ? 'G' : 'R', hmac, hostnqn, subnqn); + snprintf(identity, identity_len, "NVMe%u%c%02u %s %s", + psk_ver, generated ? 'G' : 'R', hmac, hostnqn, subnqn); if (!keyring) keyring = nvme_keyring; @@ -104,24 +123,105 @@ static struct key *nvme_tls_psk_lookup(struct key *keyring, return key_ref_to_ptr(keyref); } +/** + * nvme_tls_psk_refresh - Refresh TLS PSK + * @keyring: Keyring holding the TLS PSK + * @hostnqn: Host NQN to use + * @subnqn: Subsystem NQN to use + * @hmac_id: Hash function identifier + * @data: TLS PSK key material + * @data_len: Length of @data + * @digest: TLS PSK digest + * + * Refresh a generated version 1 TLS PSK with the identity generated + * from @hmac_id, @hostnqn, @subnqn, and @digest in the keyring given + * by @keyring. + * + * Returns the updated key success or an error pointer otherwise. + */ +struct key *nvme_tls_psk_refresh(struct key *keyring, + const char *hostnqn, const char *subnqn, u8 hmac_id, + u8 *data, size_t data_len, const char *digest) +{ + key_perm_t keyperm = + KEY_POS_SEARCH | KEY_POS_VIEW | KEY_POS_READ | + KEY_POS_WRITE | KEY_POS_LINK | KEY_POS_SETATTR | + KEY_USR_SEARCH | KEY_USR_VIEW | KEY_USR_READ; + char *identity; + key_ref_t keyref; + key_serial_t keyring_id; + struct key *key; + + if (!hostnqn || !subnqn || !data || !data_len) + return ERR_PTR(-EINVAL); + + identity = kasprintf(GFP_KERNEL, "NVMe1G%02d %s %s %s", + hmac_id, hostnqn, subnqn, digest); + if (!identity) + return ERR_PTR(-ENOMEM); + + if (!keyring) + keyring = nvme_keyring; + keyring_id = key_serial(keyring); + pr_debug("keyring %x refresh tls psk '%s'\n", + keyring_id, identity); + keyref = key_create_or_update(make_key_ref(keyring, true), + "psk", identity, data, data_len, + keyperm, KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_BUILT_IN | + KEY_ALLOC_BYPASS_RESTRICTION); + if (IS_ERR(keyref)) { + pr_debug("refresh tls psk '%s' failed, error %ld\n", + identity, PTR_ERR(keyref)); + kfree(identity); + return ERR_PTR(-ENOKEY); + } + kfree(identity); + /* + * Set the default timeout to 1 hour + * as suggested in TP8018. + */ + key = key_ref_to_ptr(keyref); + key_set_timeout(key, 3600); + return key; +} +EXPORT_SYMBOL_GPL(nvme_tls_psk_refresh); + /* * NVMe PSK priority list * - * 'Retained' PSKs (ie 'generated == false') - * should be preferred to 'generated' PSKs, - * and SHA-384 should be preferred to SHA-256. + * 'Retained' PSKs (ie 'generated == false') should be preferred to 'generated' + * PSKs, PSKs with hash (psk_ver 1) should be preferred to PSKs without hash + * (psk_ver 0), and SHA-384 should be preferred to SHA-256. */ static struct nvme_tls_psk_priority_list { bool generated; + u8 psk_ver; enum nvme_tcp_tls_cipher cipher; } nvme_tls_psk_prio[] = { { .generated = false, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, + { .generated = false, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, + { .generated = false, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, { .generated = false, + .psk_ver = 0, + .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, + { .generated = true, + .psk_ver = 1, + .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, + { .generated = true, + .psk_ver = 1, .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, { .generated = true, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA384, }, { .generated = true, + .psk_ver = 0, .cipher = NVME_TCP_TLS_CIPHER_SHA256, }, }; @@ -137,10 +237,11 @@ key_serial_t nvme_tls_psk_default(struct key *keyring, for (prio = 0; prio < ARRAY_SIZE(nvme_tls_psk_prio); prio++) { bool generated = nvme_tls_psk_prio[prio].generated; + u8 ver = nvme_tls_psk_prio[prio].psk_ver; enum nvme_tcp_tls_cipher cipher = nvme_tls_psk_prio[prio].cipher; tls_key = nvme_tls_psk_lookup(keyring, hostnqn, subnqn, - cipher, generated); + cipher, ver, generated); if (!IS_ERR(tls_key)) { tls_key_id = tls_key->serial; key_put(tls_key); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index b309c8be720f..31974c7dd20c 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only config NVME_CORE tristate - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY config BLK_DEV_NVME tristate "NVM Express block device" @@ -19,10 +18,15 @@ config NVME_MULTIPATH bool "NVMe multipath support" depends on NVME_CORE help - This option enables support for multipath access to NVMe - subsystems. If this option is enabled only a single - /dev/nvmeXnY device will show up for each NVMe namespace, - even if it is accessible through multiple controllers. + This option controls support for multipath access to NVMe + subsystems. If this option is enabled support for NVMe multipath + access is included in the kernel. If this option is disabled support + for NVMe multipath access is excluded from the kernel. When this + option is disabled each controller/namespace receives its + own /dev/nvmeXnY device entry and NVMe multipath access is + not supported. + + If unsure, say Y. config NVME_VERBOSE_ERRORS bool "NVMe verbose error reporting" @@ -42,6 +46,7 @@ config NVME_HWMON config NVME_FABRICS select NVME_CORE + select NVME_KEYRING if NVME_TCP_TLS tristate config NVME_RDMA @@ -79,9 +84,9 @@ config NVME_TCP tristate "NVM Express over Fabrics TCP host driver" depends on INET depends on BLOCK + select CRC32 + select NET_CRC32C select NVME_FABRICS - select CRYPTO - select CRYPTO_CRC32C help This provides support for the NVMe over Fabrics protocol using the TCP transport. This allows you to use remote block devices @@ -95,13 +100,13 @@ config NVME_TCP config NVME_TCP_TLS bool "NVMe over Fabrics TCP TLS encryption support" depends on NVME_TCP - select NVME_KEYRING select NET_HANDSHAKE select KEYS + select TLS help Enables TLS encryption for NVMe TCP using the netlink handshake API. - The TLS handshake daemon is availble at + The TLS handshake daemon is available at https://github.com/oracle/ktls-utils. If unsure, say N. @@ -110,6 +115,7 @@ config NVME_HOST_AUTH bool "NVMe over Fabrics In-Band Authentication in host side" depends on NVME_CORE select NVME_AUTH + select NVME_KEYRING help This provides support for NVMe over Fabrics In-Band Authentication in host side. diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 0cfa39361d3b..15b3d07f8ccd 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -35,7 +35,6 @@ #include "nvme.h" #define APPLE_ANS_BOOT_TIMEOUT USEC_PER_SEC -#define APPLE_ANS_MAX_QUEUE_DEPTH 64 #define APPLE_ANS_COPROC_CPU_CONTROL 0x44 #define APPLE_ANS_COPROC_CPU_CONTROL_RUN BIT(4) @@ -75,6 +74,8 @@ #define APPLE_NVME_AQ_DEPTH 2 #define APPLE_NVME_AQ_MQ_TAG_DEPTH (APPLE_NVME_AQ_DEPTH - 1) +#define APPLE_NVME_IOSQES 7 + /* * These can be higher, but we need to ensure that any command doesn't * require an sg allocation that needs more than a page of data. @@ -142,6 +143,7 @@ struct apple_nvme_queue { u32 __iomem *sq_db; u32 __iomem *cq_db; + u16 sq_tail; u16 cq_head; u8 cq_phase; @@ -166,11 +168,17 @@ struct apple_nvme_iod { struct scatterlist *sg; }; +struct apple_nvme_hw { + bool has_lsq_nvmmu; + u32 max_queue_depth; +}; + struct apple_nvme { struct device *dev; void __iomem *mmio_coproc; void __iomem *mmio_nvme; + const struct apple_nvme_hw *hw; struct device **pd_dev; struct device_link **pd_link; @@ -215,13 +223,15 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q) static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q) { - if (q->is_adminq) + struct apple_nvme *anv = queue_to_apple_nvme(q); + + if (q->is_adminq && anv->hw->has_lsq_nvmmu) return APPLE_NVME_AQ_DEPTH; - return APPLE_ANS_MAX_QUEUE_DEPTH; + return anv->hw->max_queue_depth; } -static void apple_nvme_rtkit_crashed(void *cookie) +static void apple_nvme_rtkit_crashed(void *cookie, const void *crashlog, size_t crashlog_size) { struct apple_nvme *anv = cookie; @@ -280,7 +290,28 @@ static void apple_nvmmu_inval(struct apple_nvme_queue *q, unsigned int tag) "NVMMU TCB invalidation failed\n"); } -static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, +static void apple_nvme_submit_cmd_t8015(struct apple_nvme_queue *q, + struct nvme_command *cmd) +{ + struct apple_nvme *anv = queue_to_apple_nvme(q); + + spin_lock_irq(&anv->lock); + + if (q->is_adminq) + memcpy(&q->sqes[q->sq_tail], cmd, sizeof(*cmd)); + else + memcpy((void *)q->sqes + (q->sq_tail << APPLE_NVME_IOSQES), + cmd, sizeof(*cmd)); + + if (++q->sq_tail == anv->hw->max_queue_depth) + q->sq_tail = 0; + + writel(q->sq_tail, q->sq_db); + spin_unlock_irq(&anv->lock); +} + + +static void apple_nvme_submit_cmd_t8103(struct apple_nvme_queue *q, struct nvme_command *cmd) { struct apple_nvme *anv = queue_to_apple_nvme(q); @@ -301,8 +332,8 @@ static void apple_nvme_submit_cmd(struct apple_nvme_queue *q, memcpy(&q->sqes[tag], cmd, sizeof(*cmd)); /* - * This lock here doesn't make much sense at a first glace but - * removing it will result in occasional missed completetion + * This lock here doesn't make much sense at a first glance but + * removing it will result in occasional missed completion * interrupts even though the commands still appear on the CQ. * It's unclear why this happens but our best guess is that * there is a bug in the firmware triggered when a new command @@ -525,7 +556,7 @@ static blk_status_t apple_nvme_map_data(struct apple_nvme *anv, if (!iod->sg) return BLK_STS_RESOURCE; sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); - iod->nents = blk_rq_map_sg(req->q, req, iod->sg); + iod->nents = blk_rq_map_sg(req, iod->sg); if (!iod->nents) goto out_free_sg; @@ -590,7 +621,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, __u16 command_id = READ_ONCE(cqe->command_id); struct request *req; - apple_nvmmu_inval(q, command_id); + if (anv->hw->has_lsq_nvmmu) + apple_nvmmu_inval(q, command_id); req = nvme_find_rq(apple_nvme_queue_tagset(anv, q), command_id); if (unlikely(!req)) { @@ -599,7 +631,8 @@ static inline void apple_nvme_handle_cqe(struct apple_nvme_queue *q, } if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, apple_nvme_complete_batch)) apple_nvme_complete_rq(req); } @@ -649,7 +682,7 @@ static bool apple_nvme_handle_cq(struct apple_nvme_queue *q, bool force) found = apple_nvme_poll_cq(q, &iob); - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) apple_nvme_complete_batch(&iob); return found; @@ -684,7 +717,7 @@ static int apple_nvme_create_cq(struct apple_nvme *anv) c.create_cq.opcode = nvme_admin_create_cq; c.create_cq.prp1 = cpu_to_le64(anv->ioq.cq_dma_addr); c.create_cq.cqid = cpu_to_le16(1); - c.create_cq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_cq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1); c.create_cq.cq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED); c.create_cq.irq_vector = cpu_to_le16(0); @@ -712,7 +745,7 @@ static int apple_nvme_create_sq(struct apple_nvme *anv) c.create_sq.opcode = nvme_admin_create_sq; c.create_sq.prp1 = cpu_to_le64(anv->ioq.sq_dma_addr); c.create_sq.sqid = cpu_to_le16(1); - c.create_sq.qsize = cpu_to_le16(APPLE_ANS_MAX_QUEUE_DEPTH - 1); + c.create_sq.qsize = cpu_to_le16(anv->hw->max_queue_depth - 1); c.create_sq.sq_flags = cpu_to_le16(NVME_QUEUE_PHYS_CONTIG); c.create_sq.cqid = cpu_to_le16(1); @@ -764,7 +797,12 @@ static blk_status_t apple_nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } nvme_start_request(req); - apple_nvme_submit_cmd(q, cmnd); + + if (anv->hw->has_lsq_nvmmu) + apple_nvme_submit_cmd_t8103(q, cmnd); + else + apple_nvme_submit_cmd_t8015(q, cmnd); + return BLK_STS_OK; out_free_cmd: @@ -969,11 +1007,13 @@ static const struct blk_mq_ops apple_nvme_mq_ops = { static void apple_nvme_init_queue(struct apple_nvme_queue *q) { unsigned int depth = apple_nvme_queue_depth(q); + struct apple_nvme *anv = queue_to_apple_nvme(q); q->cq_head = 0; q->cq_phase = 1; - memset(q->tcbs, 0, - APPLE_ANS_MAX_QUEUE_DEPTH * sizeof(struct apple_nvmmu_tcb)); + if (anv->hw->has_lsq_nvmmu) + memset(q->tcbs, 0, anv->hw->max_queue_depth + * sizeof(struct apple_nvmmu_tcb)); memset(q->cqes, 0, depth * sizeof(struct nvme_completion)); WRITE_ONCE(q->enabled, true); wmb(); /* ensure the first interrupt sees the initialization */ @@ -1011,25 +1051,37 @@ static void apple_nvme_reset_work(struct work_struct *work) ret = apple_rtkit_shutdown(anv->rtk); if (ret) goto out; + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); } - writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + /* + * Only do the soft-reset if the CPU is not running, which means either we + * or the previous stage shut it down cleanly. + */ + if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) & + APPLE_ANS_COPROC_CPU_CONTROL_RUN)) { - ret = reset_control_assert(anv->reset); - if (ret) - goto out; + ret = reset_control_assert(anv->reset); + if (ret) + goto out; - ret = apple_rtkit_reinit(anv->rtk); - if (ret) - goto out; + ret = apple_rtkit_reinit(anv->rtk); + if (ret) + goto out; - ret = reset_control_deassert(anv->reset); - if (ret) - goto out; + ret = reset_control_deassert(anv->reset); + if (ret) + goto out; + + writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, + anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + + ret = apple_rtkit_boot(anv->rtk); + } else { + ret = apple_rtkit_wake(anv->rtk); + } - writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN, - anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); - ret = apple_rtkit_boot(anv->rtk); if (ret) { dev_err(anv->dev, "ANS did not boot"); goto out; @@ -1056,49 +1108,55 @@ static void apple_nvme_reset_work(struct work_struct *work) dma_set_max_seg_size(anv->dev, 0xffffffff); - /* - * Enable NVMMU and linear submission queues. - * While we could keep those disabled and pretend this is slightly - * more common NVMe controller we'd still need some quirks (e.g. - * sq entries will be 128 bytes) and Apple might drop support for - * that mode in the future. - */ - writel(APPLE_ANS_LINEAR_SQ_EN, - anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); + if (anv->hw->has_lsq_nvmmu) { + /* + * Enable NVMMU and linear submission queues which is required + * since T6000. + */ + writel(APPLE_ANS_LINEAR_SQ_EN, + anv->mmio_nvme + APPLE_ANS_LINEAR_SQ_CTRL); - /* Allow as many pending command as possible for both queues */ - writel(APPLE_ANS_MAX_QUEUE_DEPTH | (APPLE_ANS_MAX_QUEUE_DEPTH << 16), - anv->mmio_nvme + APPLE_ANS_MAX_PEND_CMDS_CTRL); + /* Allow as many pending command as possible for both queues */ + writel(anv->hw->max_queue_depth + | (anv->hw->max_queue_depth << 16), anv->mmio_nvme + + APPLE_ANS_MAX_PEND_CMDS_CTRL); - /* Setup the NVMMU for the maximum admin and IO queue depth */ - writel(APPLE_ANS_MAX_QUEUE_DEPTH - 1, - anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); + /* Setup the NVMMU for the maximum admin and IO queue depth */ + writel(anv->hw->max_queue_depth - 1, + anv->mmio_nvme + APPLE_NVMMU_NUM_TCBS); - /* - * This is probably a chicken bit: without it all commands where any PRP - * is set to zero (including those that don't use that field) fail and - * the co-processor complains about "completed with err BAD_CMD-" or - * a "NULL_PRP_PTR_ERR" in the syslog - */ - writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & - ~APPLE_ANS_PRP_NULL_CHECK, - anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); + /* + * This is probably a chicken bit: without it all commands + * where any PRP is set to zero (including those that don't use + * that field) fail and the co-processor complains about + * "completed with err BAD_CMD-" or a "NULL_PRP_PTR_ERR" in the + * syslog + */ + writel(readl(anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL) & + ~APPLE_ANS_PRP_NULL_CHECK, + anv->mmio_nvme + APPLE_ANS_UNKNOWN_CTRL); + } /* Setup the admin queue */ - aqa = APPLE_NVME_AQ_DEPTH - 1; + if (anv->hw->has_lsq_nvmmu) + aqa = APPLE_NVME_AQ_DEPTH - 1; + else + aqa = anv->hw->max_queue_depth - 1; aqa |= aqa << 16; writel(aqa, anv->mmio_nvme + NVME_REG_AQA); writeq(anv->adminq.sq_dma_addr, anv->mmio_nvme + NVME_REG_ASQ); writeq(anv->adminq.cq_dma_addr, anv->mmio_nvme + NVME_REG_ACQ); - /* Setup NVMMU for both queues */ - writeq(anv->adminq.tcb_dma_addr, - anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); - writeq(anv->ioq.tcb_dma_addr, - anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); + if (anv->hw->has_lsq_nvmmu) { + /* Setup NVMMU for both queues */ + writeq(anv->adminq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_ASQ_TCB_BASE); + writeq(anv->ioq.tcb_dma_addr, + anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE); + } anv->ctrl.sqsize = - APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */ + anv->hw->max_queue_depth - 1; /* 0's based queue depth */ anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP); dev_dbg(anv->dev, "Enabling controller now"); @@ -1225,6 +1283,7 @@ static const struct nvme_ctrl_ops nvme_ctrl_ops = { .reg_read64 = apple_nvme_reg_read64, .free_ctrl = apple_nvme_free_ctrl, .get_address = apple_nvme_get_address, + .get_virt_boundary = nvme_get_virt_boundary, }; static void apple_nvme_async_probe(void *data, async_cookie_t cookie) @@ -1251,7 +1310,6 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) anv->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; anv->admin_tagset.numa_node = NUMA_NO_NODE; anv->admin_tagset.cmd_size = sizeof(struct apple_nvme_iod); - anv->admin_tagset.flags = BLK_MQ_F_NO_SCHED; anv->admin_tagset.driver_data = &anv->adminq; ret = blk_mq_alloc_tag_set(&anv->admin_tagset); @@ -1270,12 +1328,12 @@ static int apple_nvme_alloc_tagsets(struct apple_nvme *anv) * both queues. The admin queue gets the first APPLE_NVME_AQ_DEPTH which * must be marked as reserved in the IO queue. */ - anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; - anv->tagset.queue_depth = APPLE_ANS_MAX_QUEUE_DEPTH - 1; + if (anv->hw->has_lsq_nvmmu) + anv->tagset.reserved_tags = APPLE_NVME_AQ_DEPTH; + anv->tagset.queue_depth = anv->hw->max_queue_depth - 1; anv->tagset.timeout = NVME_IO_TIMEOUT; anv->tagset.numa_node = NUMA_NO_NODE; anv->tagset.cmd_size = sizeof(struct apple_nvme_iod); - anv->tagset.flags = BLK_MQ_F_SHOULD_MERGE; anv->tagset.driver_data = &anv->ioq; ret = blk_mq_alloc_tag_set(&anv->tagset); @@ -1296,6 +1354,7 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv, struct apple_nvme_queue *q) { unsigned int depth = apple_nvme_queue_depth(q); + size_t iosq_size; q->cqes = dmam_alloc_coherent(anv->dev, depth * sizeof(struct nvme_completion), @@ -1303,22 +1362,28 @@ static int apple_nvme_queue_alloc(struct apple_nvme *anv, if (!q->cqes) return -ENOMEM; - q->sqes = dmam_alloc_coherent(anv->dev, - depth * sizeof(struct nvme_command), + if (anv->hw->has_lsq_nvmmu) + iosq_size = depth * sizeof(struct nvme_command); + else + iosq_size = depth << APPLE_NVME_IOSQES; + + q->sqes = dmam_alloc_coherent(anv->dev, iosq_size, &q->sq_dma_addr, GFP_KERNEL); if (!q->sqes) return -ENOMEM; - /* - * We need the maximum queue depth here because the NVMMU only has a - * single depth configuration shared between both queues. - */ - q->tcbs = dmam_alloc_coherent(anv->dev, - APPLE_ANS_MAX_QUEUE_DEPTH * - sizeof(struct apple_nvmmu_tcb), - &q->tcb_dma_addr, GFP_KERNEL); - if (!q->tcbs) - return -ENOMEM; + if (anv->hw->has_lsq_nvmmu) { + /* + * We need the maximum queue depth here because the NVMMU only + * has a single depth configuration shared between both queues. + */ + q->tcbs = dmam_alloc_coherent(anv->dev, + anv->hw->max_queue_depth * + sizeof(struct apple_nvmmu_tcb), + &q->tcb_dma_addr, GFP_KERNEL); + if (!q->tcbs) + return -ENOMEM; + } /* * initialize phase to make sure the allocated and empty memory @@ -1388,7 +1453,7 @@ static void devm_apple_nvme_mempool_destroy(void *data) mempool_destroy(data); } -static int apple_nvme_probe(struct platform_device *pdev) +static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct apple_nvme *anv; @@ -1396,12 +1461,18 @@ static int apple_nvme_probe(struct platform_device *pdev) anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL); if (!anv) - return -ENOMEM; + return ERR_PTR(-ENOMEM); anv->dev = get_device(dev); anv->adminq.is_adminq = true; platform_set_drvdata(pdev, anv); + anv->hw = of_device_get_match_data(&pdev->dev); + if (!anv->hw) { + ret = -ENODEV; + goto put_dev; + } + ret = apple_nvme_attach_genpd(anv); if (ret < 0) { dev_err_probe(dev, ret, "Failed to attach power domains"); @@ -1433,10 +1504,17 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } - anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; - anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; - anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; - anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + if (anv->hw->has_lsq_nvmmu) { + anv->adminq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_ASQ_DB; + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; + anv->ioq.sq_db = anv->mmio_nvme + APPLE_ANS_LINEAR_IOSQ_DB; + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + } else { + anv->adminq.sq_db = anv->mmio_nvme + NVME_REG_DBS; + anv->adminq.cq_db = anv->mmio_nvme + APPLE_ANS_ACQ_DB; + anv->ioq.sq_db = anv->mmio_nvme + NVME_REG_DBS + 8; + anv->ioq.cq_db = anv->mmio_nvme + APPLE_ANS_IOCQ_DB; + } anv->sart = devm_apple_sart_get(dev); if (IS_ERR(anv->sart)) { @@ -1516,10 +1594,31 @@ static int apple_nvme_probe(struct platform_device *pdev) goto put_dev; } + return anv; +put_dev: + apple_nvme_detach_genpd(anv); + put_device(anv->dev); + return ERR_PTR(ret); +} + +static int apple_nvme_probe(struct platform_device *pdev) +{ + struct apple_nvme *anv; + int ret; + + anv = apple_nvme_alloc(pdev); + if (IS_ERR(anv)) + return PTR_ERR(anv); + + ret = nvme_add_ctrl(&anv->ctrl); + if (ret) + goto out_put_ctrl; + anv->ctrl.admin_q = blk_mq_alloc_queue(&anv->admin_tagset, NULL, NULL); if (IS_ERR(anv->ctrl.admin_q)) { ret = -ENOMEM; - goto put_dev; + anv->ctrl.admin_q = NULL; + goto out_uninit_ctrl; } nvme_reset_ctrl(&anv->ctrl); @@ -1527,8 +1626,11 @@ static int apple_nvme_probe(struct platform_device *pdev) return 0; -put_dev: - put_device(anv->dev); +out_uninit_ctrl: + nvme_uninit_ctrl(&anv->ctrl); +out_put_ctrl: + nvme_put_ctrl(&anv->ctrl); + apple_nvme_detach_genpd(anv); return ret; } @@ -1543,9 +1645,12 @@ static void apple_nvme_remove(struct platform_device *pdev) apple_nvme_disable(anv, true); nvme_uninit_ctrl(&anv->ctrl); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { apple_rtkit_shutdown(anv->rtk); + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } + apple_nvme_detach_genpd(anv); } @@ -1554,8 +1659,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev) struct apple_nvme *anv = platform_get_drvdata(pdev); apple_nvme_disable(anv, true); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { apple_rtkit_shutdown(anv->rtk); + + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } } static int apple_nvme_resume(struct device *dev) @@ -1572,10 +1680,11 @@ static int apple_nvme_suspend(struct device *dev) apple_nvme_disable(anv, true); - if (apple_rtkit_is_running(anv->rtk)) + if (apple_rtkit_is_running(anv->rtk)) { ret = apple_rtkit_shutdown(anv->rtk); - writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL); + } return ret; } @@ -1583,8 +1692,19 @@ static int apple_nvme_suspend(struct device *dev) static DEFINE_SIMPLE_DEV_PM_OPS(apple_nvme_pm_ops, apple_nvme_suspend, apple_nvme_resume); +static const struct apple_nvme_hw apple_nvme_t8015_hw = { + .has_lsq_nvmmu = false, + .max_queue_depth = 16, +}; + +static const struct apple_nvme_hw apple_nvme_t8103_hw = { + .has_lsq_nvmmu = true, + .max_queue_depth = 64, +}; + static const struct of_device_id apple_nvme_of_match[] = { - { .compatible = "apple,nvme-ans2" }, + { .compatible = "apple,t8015-nvme-ans2", .data = &apple_nvme_t8015_hw }, + { .compatible = "apple,nvme-ans2", .data = &apple_nvme_t8103_hw }, {}, }; MODULE_DEVICE_TABLE(of, apple_nvme_of_match); @@ -1596,7 +1716,7 @@ static struct platform_driver apple_nvme_driver = { .pm = pm_sleep_ptr(&apple_nvme_pm_ops), }, .probe = apple_nvme_probe, - .remove_new = apple_nvme_remove, + .remove = apple_nvme_remove, .shutdown = apple_nvme_shutdown, }; module_platform_driver(apple_nvme_driver); diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c index 371e14f0a203..8f3ccb317e4d 100644 --- a/drivers/nvme/host/auth.c +++ b/drivers/nvme/host/auth.c @@ -6,12 +6,13 @@ #include <linux/crc32.h> #include <linux/base64.h> #include <linux/prandom.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <crypto/hash.h> #include <crypto/dh.h> #include "nvme.h" #include "fabrics.h" #include <linux/nvme-auth.h> +#include <linux/nvme-keyring.h> #define CHAP_BUF_SIZE 4096 static struct kmem_cache *nvme_chap_buf_cache; @@ -30,10 +31,12 @@ struct nvme_dhchap_queue_context { u32 s1; u32 s2; bool bi_directional; + bool authenticated; u16 transaction; u8 status; u8 dhgroup_id; u8 hash_id; + u8 sc_c; size_t hash_len; u8 c1[64]; u8 c2[64]; @@ -131,7 +134,13 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, data->auth_type = NVME_AUTH_COMMON_MESSAGES; data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; data->t_id = cpu_to_le16(chap->transaction); - data->sc_c = 0; /* No secure channel concatenation */ + if (ctrl->opts->concat && chap->qid == 0) { + if (ctrl->opts->tls_key) + data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK; + else + data->sc_c = NVME_AUTH_SECP_NEWTLSPSK; + } else + data->sc_c = NVME_AUTH_SECP_NOSC; data->napd = 1; data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; data->auth_protocol[0].dhchap.halen = 3; @@ -146,6 +155,8 @@ static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; + chap->sc_c = data->sc_c; + return size; } @@ -311,8 +322,9 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, data->hl = chap->hash_len; data->dhvlen = cpu_to_le16(chap->host_key_len); memcpy(data->rval, chap->response, chap->hash_len); - if (ctrl->ctrl_key) { + if (ctrl->ctrl_key) chap->bi_directional = true; + if (ctrl->ctrl_key || ctrl->opts->concat) { get_random_bytes(chap->c2, chap->hash_len); data->cvalid = 1; memcpy(data->rval + chap->hash_len, chap->c2, @@ -322,7 +334,11 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, } else { memset(chap->c2, 0, chap->hash_len); } - chap->s2 = nvme_auth_get_seqnum(); + if (ctrl->opts->concat) { + chap->s2 = 0; + chap->bi_directional = false; + } else + chap->s2 = nvme_auth_get_seqnum(); data->seqnum = cpu_to_le32(chap->s2); if (chap->host_key_len) { dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", @@ -476,7 +492,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; - memset(buf, 0, sizeof(buf)); + *buf = chap->sc_c; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; @@ -487,6 +503,7 @@ static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, strlen(ctrl->opts->host->nqn)); if (ret) goto out; + memset(buf, 0, sizeof(buf)); ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; @@ -671,12 +688,99 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) { nvme_auth_reset_dhchap(chap); + chap->authenticated = false; if (chap->shash_tfm) crypto_free_shash(chap->shash_tfm); if (chap->dh_tfm) crypto_free_kpp(chap->dh_tfm); } +void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) +{ + dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n", + key_serial(ctrl->opts->tls_key)); + key_revoke(ctrl->opts->tls_key); + key_put(ctrl->opts->tls_key); + ctrl->opts->tls_key = NULL; +} +EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key); + +static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl, + struct nvme_dhchap_queue_context *chap) +{ + u8 *psk, *digest, *tls_psk; + struct key *tls_key; + size_t psk_len; + int ret = 0; + + if (!chap->sess_key) { + dev_warn(ctrl->device, + "%s: qid %d no session key negotiated\n", + __func__, chap->qid); + return -ENOKEY; + } + + if (chap->qid) { + dev_warn(ctrl->device, + "qid %d: secure concatenation not supported on I/O queues\n", + chap->qid); + return -EINVAL; + } + ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key, + chap->sess_key_len, + chap->c1, chap->c2, + chap->hash_len, &psk, &psk_len); + if (ret) { + dev_warn(ctrl->device, + "%s: qid %d failed to generate PSK, error %d\n", + __func__, chap->qid, ret); + return ret; + } + dev_dbg(ctrl->device, + "%s: generated psk %*ph\n", __func__, (int)psk_len, psk); + + ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len, + ctrl->opts->subsysnqn, + ctrl->opts->host->nqn, &digest); + if (ret) { + dev_warn(ctrl->device, + "%s: qid %d failed to generate digest, error %d\n", + __func__, chap->qid, ret); + goto out_free_psk; + } + dev_dbg(ctrl->device, "%s: generated digest %s\n", + __func__, digest); + ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len, + digest, &tls_psk); + if (ret) { + dev_warn(ctrl->device, + "%s: qid %d failed to derive TLS psk, error %d\n", + __func__, chap->qid, ret); + goto out_free_digest; + } + + tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring, + ctrl->opts->host->nqn, + ctrl->opts->subsysnqn, chap->hash_id, + tls_psk, psk_len, digest); + if (IS_ERR(tls_key)) { + ret = PTR_ERR(tls_key); + dev_warn(ctrl->device, + "%s: qid %d failed to insert generated key, error %d\n", + __func__, chap->qid, ret); + tls_key = NULL; + } + kfree_sensitive(tls_psk); + if (ctrl->opts->tls_key) + nvme_auth_revoke_tls_key(ctrl); + ctrl->opts->tls_key = tls_key; +out_free_digest: + kfree_sensitive(digest); +out_free_psk: + kfree_sensitive(psk); + return ret; +} + static void nvme_queue_auth_work(struct work_struct *work) { struct nvme_dhchap_queue_context *chap = @@ -833,6 +937,15 @@ static void nvme_queue_auth_work(struct work_struct *work) } if (!ret) { chap->error = 0; + chap->authenticated = true; + if (ctrl->opts->concat && + (ret = nvme_auth_secure_concat(ctrl, chap))) { + dev_warn(ctrl->device, + "%s: qid %d failed to enable secure concatenation\n", + __func__, chap->qid); + chap->error = ret; + chap->authenticated = false; + } return; } @@ -912,15 +1025,23 @@ static void nvme_ctrl_auth_work(struct work_struct *work) "qid 0: authentication failed\n"); return; } + /* + * Only run authentication on the admin queue for secure concatenation. + */ + if (ctrl->opts->concat) + return; for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_negotiate(ctrl, q); - if (ret) { - dev_warn(ctrl->device, - "qid %d: error %d setting up authentication\n", - q, ret); - break; - } + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + /* + * Skip re-authentication if the queue had + * not been authenticated initially. + */ + if (!chap->authenticated) + continue; + cancel_work_sync(&chap->auth_work); + queue_work(nvme_auth_wq, &chap->auth_work); } /* @@ -928,7 +1049,13 @@ static void nvme_ctrl_auth_work(struct work_struct *work) * the controller terminates the connection. */ for (q = 1; q < ctrl->queue_count; q++) { - ret = nvme_auth_wait(ctrl, q); + struct nvme_dhchap_queue_context *chap = + &ctrl->dhchap_ctxs[q]; + if (!chap->authenticated) + continue; + flush_work(&chap->auth_work); + ret = chap->error; + nvme_auth_reset_dhchap(chap); if (ret) dev_warn(ctrl->device, "qid %d: authentication failed\n", q); @@ -967,6 +1094,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) chap = &ctrl->dhchap_ctxs[i]; chap->qid = i; chap->ctrl = ctrl; + chap->authenticated = false; INIT_WORK(&chap->auth_work, nvme_queue_auth_work); } @@ -994,7 +1122,7 @@ void nvme_auth_free(struct nvme_ctrl *ctrl) if (ctrl->dhchap_ctxs) { for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); - kfree(ctrl->dhchap_ctxs); + kvfree(ctrl->dhchap_ctxs); } if (ctrl->host_key) { nvme_auth_free_key(ctrl->host_key); diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c index 6f2ebb5fcdb0..dc90df9e13a2 100644 --- a/drivers/nvme/host/constants.c +++ b/drivers/nvme/host/constants.c @@ -133,7 +133,7 @@ static const char * const nvme_statuses[] = { [NVME_SC_NS_NOT_ATTACHED] = "Namespace Not Attached", [NVME_SC_THIN_PROV_NOT_SUPP] = "Thin Provisioning Not Supported", [NVME_SC_CTRL_LIST_INVALID] = "Controller List Invalid", - [NVME_SC_SELT_TEST_IN_PROGRESS] = "Device Self-test In Progress", + [NVME_SC_SELF_TEST_IN_PROGRESS] = "Device Self-test In Progress", [NVME_SC_BP_WRITE_PROHIBITED] = "Boot Partition Write Prohibited", [NVME_SC_CTRL_ID_INVALID] = "Invalid Controller Identifier", [NVME_SC_SEC_CTRL_STATE_INVALID] = "Invalid Secondary Controller State", @@ -145,7 +145,7 @@ static const char * const nvme_statuses[] = { [NVME_SC_BAD_ATTRIBUTES] = "Conflicting Attributes", [NVME_SC_INVALID_PI] = "Invalid Protection Information", [NVME_SC_READ_ONLY] = "Attempted Write to Read Only Range", - [NVME_SC_ONCS_NOT_SUPPORTED] = "ONCS Not Supported", + [NVME_SC_CMD_SIZE_LIM_EXCEEDED] = "Command Size Limits Exceeded", [NVME_SC_ZONE_BOUNDARY_ERROR] = "Zoned Boundary Error", [NVME_SC_ZONE_FULL] = "Zone Is Full", [NVME_SC_ZONE_READ_ONLY] = "Zone Is Read Only", @@ -173,7 +173,7 @@ static const char * const nvme_statuses[] = { const char *nvme_get_error_status_str(u16 status) { - status &= 0x7ff; + status &= NVME_SCT_SC_MASK; if (status < ARRAY_SIZE(nvme_statuses) && nvme_statuses[status]) return nvme_statuses[status]; return "Unknown"; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 782090ce0bc1..7bf228df6001 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4,6 +4,7 @@ * Copyright (c) 2011-2014, Intel Corporation. */ +#include <linux/async.h> #include <linux/blkdev.h> #include <linux/blk-mq.h> #include <linux/blk-integrity.h> @@ -21,7 +22,7 @@ #include <linux/nvme_ioctl.h> #include <linux/pm_qos.h> #include <linux/ratelimit.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "nvme.h" #include "fabrics.h" @@ -36,10 +37,15 @@ struct nvme_ns_info { struct nvme_ns_ids ids; u32 nsid; __le32 anagrpid; + u8 pi_offset; + u16 endgid; + u64 runs; bool is_shared; bool is_readonly; bool is_ready; bool is_removed; + bool is_rotational; + bool no_vwc; }; unsigned int admin_timeout = 60; @@ -90,6 +96,17 @@ MODULE_PARM_DESC(apst_secondary_latency_tol_us, "secondary APST latency tolerance in us"); /* + * Older kernels didn't enable protection information if it was at an offset. + * Newer kernels do, so it breaks reads on the upgrade if such formats were + * used in prior kernels since the metadata written did not contain a valid + * checksum. + */ +static bool disable_pi_offsets = false; +module_param(disable_pi_offsets, bool, 0444); +MODULE_PARM_DESC(disable_pi_offsets, + "disable protection information if it has an offset"); + +/* * nvme_wq - hosts nvme related works that are not reset or delete * nvme_reset_wq - hosts nvme reset works * nvme_delete_wq - hosts nvme delete works @@ -110,7 +127,7 @@ struct workqueue_struct *nvme_delete_wq; EXPORT_SYMBOL_GPL(nvme_delete_wq); static LIST_HEAD(nvme_subsystems); -static DEFINE_MUTEX(nvme_subsystems_lock); +DEFINE_MUTEX(nvme_subsystems_lock); static DEFINE_IDA(nvme_instance_ida); static dev_t nvme_ctrl_base_chr_devt; @@ -135,6 +152,8 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid); static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, struct nvme_command *cmd); +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi); void nvme_queue_scan(struct nvme_ctrl *ctrl) { @@ -261,7 +280,7 @@ void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl) static blk_status_t nvme_error_status(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return BLK_STS_OK; case NVME_SC_CAP_EXCEEDED: @@ -271,7 +290,6 @@ static blk_status_t nvme_error_status(u16 status) case NVME_SC_NS_NOT_READY: return BLK_STS_TARGET; case NVME_SC_BAD_ATTRIBUTES: - case NVME_SC_ONCS_NOT_SUPPORTED: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: case NVME_SC_INVALID_NS: @@ -307,7 +325,7 @@ static void nvme_retry_req(struct request *req) u16 crd; /* The mask and shift result must be <= 3 */ - crd = (nvme_req(req)->status & NVME_SC_CRD) >> 11; + crd = (nvme_req(req)->status & NVME_STATUS_CRD) >> 11; if (crd) delay = nvme_req(req)->ctrl->crdt[crd - 1] * 100; @@ -329,10 +347,10 @@ static void nvme_log_error(struct request *req) nvme_sect_to_lba(ns->head, blk_rq_pos(req)), blk_rq_bytes(req) >> ns->head->lba_shift, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); return; } @@ -341,10 +359,10 @@ static void nvme_log_error(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : ""); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : ""); } static void nvme_log_err_passthru(struct request *req) @@ -359,16 +377,16 @@ static void nvme_log_err_passthru(struct request *req) nvme_get_admin_opcode_str(nr->cmd->common.opcode), nr->cmd->common.opcode, nvme_get_error_status_str(nr->status), - nr->status >> 8 & 7, /* Status Code Type */ - nr->status & 0xff, /* Status Code */ - nr->status & NVME_SC_MORE ? "MORE " : "", - nr->status & NVME_SC_DNR ? "DNR " : "", - nr->cmd->common.cdw10, - nr->cmd->common.cdw11, - nr->cmd->common.cdw12, - nr->cmd->common.cdw13, - nr->cmd->common.cdw14, - nr->cmd->common.cdw14); + NVME_SCT(nr->status), /* Status Code Type */ + nr->status & NVME_SC_MASK, /* Status Code */ + nr->status & NVME_STATUS_MORE ? "MORE " : "", + nr->status & NVME_STATUS_DNR ? "DNR " : "", + le32_to_cpu(nr->cmd->common.cdw10), + le32_to_cpu(nr->cmd->common.cdw11), + le32_to_cpu(nr->cmd->common.cdw12), + le32_to_cpu(nr->cmd->common.cdw13), + le32_to_cpu(nr->cmd->common.cdw14), + le32_to_cpu(nr->cmd->common.cdw15)); } enum nvme_disposition { @@ -384,11 +402,11 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req) return COMPLETE; if (blk_noretry_request(req) || - (nvme_req(req)->status & NVME_SC_DNR) || + (nvme_req(req)->status & NVME_STATUS_DNR) || nvme_req(req)->retries >= nvme_max_retries) return COMPLETE; - if ((nvme_req(req)->status & 0x7ff) == NVME_SC_AUTH_REQUIRED) + if ((nvme_req(req)->status & NVME_SCT_SC_MASK) == NVME_SC_AUTH_REQUIRED) return AUTHENTICATE; if (req->cmd_flags & REQ_NVME_MPATH) { @@ -416,6 +434,12 @@ static inline void nvme_end_req_zoned(struct request *req) static inline void __nvme_end_req(struct request *req) { + if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { + if (blk_rq_is_passthrough(req)) + nvme_log_err_passthru(req); + else + nvme_log_error(req); + } nvme_end_req_zoned(req); nvme_trace_bio_complete(req); if (req->cmd_flags & REQ_NVME_MPATH) @@ -426,12 +450,6 @@ void nvme_end_req(struct request *req) { blk_status_t status = nvme_error_status(nvme_req(req)->status); - if (unlikely(nvme_req(req)->status && !(req->rq_flags & RQF_QUIET))) { - if (blk_rq_is_passthrough(req)) - nvme_log_err_passthru(req); - else - nvme_log_error(req); - } __nvme_end_req(req); blk_mq_end_request(req, status); } @@ -549,8 +567,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { - case NVME_CTRL_NEW: - case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; @@ -651,10 +667,11 @@ static void nvme_free_ns_head(struct kref *ref) struct nvme_ns_head *head = container_of(ref, struct nvme_ns_head, ref); - nvme_mpath_remove_disk(head); + nvme_mpath_put_disk(head); ida_free(&head->subsys->ns_ida, head->instance); cleanup_srcu_struct(&head->srcu); nvme_put_subsystem(head->subsys); + kfree(head->plids); kfree(head); } @@ -687,7 +704,7 @@ void nvme_put_ns(struct nvme_ns *ns) { kref_put(&ns->kref, nvme_free_ns); } -EXPORT_SYMBOL_NS_GPL(nvme_put_ns, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_put_ns, "NVME_TARGET_PASSTHRU"); static inline void nvme_clear_nvme_request(struct request *req) { @@ -747,6 +764,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; + + if (!(rq->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(rq); + return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); @@ -870,12 +891,27 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, return BLK_STS_OK; } +static void nvme_set_app_tag(struct request *req, struct nvme_command *cmnd) +{ + cmnd->rw.lbat = cpu_to_le16(bio_integrity(req->bio)->app_tag); + cmnd->rw.lbatm = cpu_to_le16(0xffff); +} + static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd, struct request *req) { u32 upper, lower; u64 ref48; + /* only type1 and type 2 PI formats have a reftag */ + switch (ns->head->pi_type) { + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + break; + default: + return; + } + /* both rw and write zeroes share the same reftag format */ switch (ns->head->guard_type) { case NVME_NVM_NS_16B_GUARD: @@ -915,18 +951,42 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns, if (nvme_ns_has_pi(ns->head)) { cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT); - - switch (ns->head->pi_type) { - case NVME_NS_DPS_PI_TYPE1: - case NVME_NS_DPS_PI_TYPE2: - nvme_set_ref_tag(ns, cmnd, req); - break; - } + nvme_set_ref_tag(ns, cmnd, req); } return BLK_STS_OK; } +/* + * NVMe does not support a dedicated command to issue an atomic write. A write + * which does adhere to the device atomic limits will silently be executed + * non-atomically. The request issuer should ensure that the write is within + * the queue atomic writes limits, but just validate this in case it is not. + */ +static bool nvme_valid_atomic_write(struct request *req) +{ + struct request_queue *q = req->q; + u32 boundary_bytes = queue_atomic_write_boundary_bytes(q); + + if (blk_rq_bytes(req) > queue_atomic_write_unit_max_bytes(q)) + return false; + + if (boundary_bytes) { + u64 mask = boundary_bytes - 1, imask = ~mask; + u64 start = blk_rq_pos(req) << SECTOR_SHIFT; + u64 end = start + blk_rq_bytes(req) - 1; + + /* If greater then must be crossing a boundary */ + if (blk_rq_bytes(req) > boundary_bytes) + return false; + + if ((start & imask) != (end & imask)) + return false; + } + + return true; +} + static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, struct request *req, struct nvme_command *cmnd, enum nvme_opcode op) @@ -942,6 +1002,21 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (req->cmd_flags & REQ_RAHEAD) dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH; + if (op == nvme_cmd_write && ns->head->nr_plids) { + u16 write_stream = req->bio->bi_write_stream; + + if (WARN_ON_ONCE(write_stream > ns->head->nr_plids)) + return BLK_STS_INVAL; + + if (write_stream) { + dsmgmt |= ns->head->plids[write_stream - 1] << 16; + control |= NVME_RW_DTYPE_DPLCMT; + } + } + + if (req->cmd_flags & REQ_ATOMIC && !nvme_valid_atomic_write(req)) + return BLK_STS_INVAL; + cmnd->rw.opcode = op; cmnd->rw.flags = 0; cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id); @@ -953,12 +1028,12 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1); cmnd->rw.reftag = 0; - cmnd->rw.apptag = 0; - cmnd->rw.appmask = 0; + cmnd->rw.lbat = 0; + cmnd->rw.lbatm = 0; if (ns->head->ms) { /* - * If formated with metadata, the block layer always provides a + * If formatted with metadata, the block layer always provides a * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled. Else * we enable the PRACT bit for protection information or set the * namespace capacity to zero to prevent any I/O. @@ -967,20 +1042,20 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; + nvme_set_ref_tag(ns, cmnd, req); } - switch (ns->head->pi_type) { - case NVME_NS_DPS_PI_TYPE3: + if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD)) control |= NVME_RW_PRINFO_PRCHK_GUARD; - break; - case NVME_NS_DPS_PI_TYPE1: - case NVME_NS_DPS_PI_TYPE2: - control |= NVME_RW_PRINFO_PRCHK_GUARD | - NVME_RW_PRINFO_PRCHK_REF; + if (bio_integrity_flagged(req->bio, BIP_CHECK_REFTAG)) { + control |= NVME_RW_PRINFO_PRCHK_REF; if (op == nvme_cmd_zone_append) control |= NVME_RW_APPEND_PIREMAP; nvme_set_ref_tag(ns, cmnd, req); - break; + } + if (bio_integrity_flagged(req->bio, BIP_CHECK_APPTAG)) { + control |= NVME_RW_PRINFO_PRCHK_APP; + nvme_set_app_tag(req, cmnd); } } @@ -1075,7 +1150,7 @@ int nvme_execute_rq(struct request *rq, bool at_head) return nvme_req(rq)->status; return blk_status_to_errno(status); } -EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, "NVME_TARGET_PASSTHRU"); /* * Returns 0 on success. If the result is negative, it's a Linux error code; @@ -1106,7 +1181,7 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, req->cmd_flags &= ~REQ_FAILFAST_DRIVER; if (buffer && bufflen) { - ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); + ret = blk_rq_map_kern(req, buffer, bufflen, GFP_KERNEL); if (ret) goto out; } @@ -1155,7 +1230,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) return effects; } -EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_command_effects, "NVME_TARGET_PASSTHRU"); u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) { @@ -1175,7 +1250,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode) } return effects; } -EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, "NVME_TARGET_PASSTHRU"); void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, struct nvme_command *cmd, int status) @@ -1220,11 +1295,11 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, break; } } -EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, "NVME_TARGET_PASSTHRU"); /* * Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1: - * + * * The host should send Keep Alive commands at half of the Keep Alive Timeout * accounting for transport roundtrip times [..]. */ @@ -1261,10 +1336,9 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, blk_status_t status) { struct nvme_ctrl *ctrl = rq->end_io_data; - unsigned long flags; - bool startka = false; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long delay = nvme_keep_alive_work_period(ctrl); + enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); /* * Subtract off the keepalive RTT so nvme_keep_alive_work runs @@ -1289,12 +1363,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; - spin_lock_irqsave(&ctrl->lock, flags); - if (ctrl->state == NVME_CTRL_LIVE || - ctrl->state == NVME_CTRL_CONNECTING) - startka = true; - spin_unlock_irqrestore(&ctrl->lock, flags); - if (startka) + if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); return RQ_END_IO_NONE; } @@ -1364,17 +1433,30 @@ static void nvme_update_keep_alive(struct nvme_ctrl *ctrl, nvme_start_keep_alive(ctrl); } -/* - * In NVMe 1.0 the CNS field was just a binary controller or namespace - * flag, thus sending any new CNS opcodes has a big chance of not working. - * Qemu unfortunately had that bug after reporting a 1.1 version compliance - * (but not for any later version). - */ -static bool nvme_ctrl_limited_cns(struct nvme_ctrl *ctrl) +static bool nvme_id_cns_ok(struct nvme_ctrl *ctrl, u8 cns) { - if (ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS) - return ctrl->vs < NVME_VS(1, 2, 0); - return ctrl->vs < NVME_VS(1, 1, 0); + /* + * The CNS field occupies a full byte starting with NVMe 1.2 + */ + if (ctrl->vs >= NVME_VS(1, 2, 0)) + return true; + + /* + * NVMe 1.1 expanded the CNS value to two bits, which means values + * larger than that could get truncated and treated as an incorrect + * value. + * + * Qemu implemented 1.0 behavior for controllers claiming 1.1 + * compliance, so they need to be quirked here. + */ + if (ctrl->vs >= NVME_VS(1, 1, 0) && + !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) + return cns <= 3; + + /* + * NVMe 1.0 used a single bit for the CNS value. + */ + return cns <= 1; } static int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) @@ -1551,6 +1633,7 @@ static int nvme_ns_info_from_identify(struct nvme_ctrl *ctrl, info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = true; + info->endgid = le16_to_cpu(id->endgid); if (ctrl->quirks & NVME_QUIRK_BOGUS_NID) { dev_info(ctrl->device, "Ignoring bogus Namespace Identifiers\n"); @@ -1589,6 +1672,9 @@ static int nvme_ns_info_from_id_cs_indep(struct nvme_ctrl *ctrl, info->is_shared = id->nmic & NVME_NS_NMIC_SHARED; info->is_readonly = id->nsattr & NVME_NS_ATTR_RO; info->is_ready = id->nstat & NVME_NSTAT_NRDY; + info->is_rotational = id->nsfeat & NVME_NS_ROTATIONAL; + info->no_vwc = id->nsfeat & NVME_NS_VWC_NOT_PRESENT; + info->endgid = le16_to_cpu(id->endgid); } kfree(id); return ret; @@ -1614,7 +1700,7 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid, int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_set_features, fid, dword11, buffer, buflen, result); @@ -1623,7 +1709,7 @@ EXPORT_SYMBOL_GPL(nvme_set_features); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result) + void *result) { return nvme_features(dev, nvme_admin_get_features, fid, dword11, buffer, buflen, result); @@ -1638,7 +1724,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, &result); - if (status < 0) + + /* + * It's either a kernel error or the host observed a connection + * lost. In either case it's not possible communicate with the + * controller and thus enter the error code path. + */ + if (status < 0 || status == NVME_SC_HOST_PATH_ERROR) return status; /* @@ -1715,20 +1807,21 @@ static void nvme_release(struct gendisk *disk) nvme_ns_release(disk->private_data); } -int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo) +int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo) { /* some standard values */ geo->heads = 1 << 6; geo->sectors = 1 << 5; - geo->cylinders = get_capacity(bdev->bd_disk) >> 11; + geo->cylinders = get_capacity(disk) >> 11; return 0; } -static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) +static bool nvme_init_integrity(struct nvme_ns_head *head, + struct queue_limits *lim, struct nvme_ns_info *info) { - struct blk_integrity integrity = { }; + struct blk_integrity *bi = &lim->integrity; - blk_integrity_unregister(disk); + memset(bi, 0, sizeof(*bi)); if (!head->ms) return true; @@ -1745,17 +1838,16 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE3: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type3_crc; - integrity.tag_size = sizeof(u16) + sizeof(u32); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16) + sizeof(u32); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type3_crc64; - integrity.tag_size = sizeof(u16) + 6; - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16) + 6; + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE; break; default: - integrity.profile = NULL; break; } break; @@ -1763,28 +1855,30 @@ static bool nvme_init_integrity(struct gendisk *disk, struct nvme_ns_head *head) case NVME_NS_DPS_PI_TYPE2: switch (head->guard_type) { case NVME_NVM_NS_16B_GUARD: - integrity.profile = &t10_pi_type1_crc; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; case NVME_NVM_NS_64B_GUARD: - integrity.profile = &ext_pi_type1_crc64; - integrity.tag_size = sizeof(u16); - integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE; + bi->csum_type = BLK_INTEGRITY_CSUM_CRC64; + bi->tag_size = sizeof(u16); + bi->flags |= BLK_INTEGRITY_DEVICE_CAPABLE | + BLK_INTEGRITY_REF_TAG; break; default: - integrity.profile = NULL; break; } break; default: - integrity.profile = NULL; break; } - integrity.tuple_size = head->ms; - integrity.pi_offset = head->pi_offset; - blk_integrity_register(disk, &integrity); + bi->metadata_size = head->ms; + if (bi->csum_type) { + bi->pi_tuple_size = head->pi_size; + bi->pi_offset = info->pi_offset; + } return true; } @@ -1844,12 +1938,18 @@ static void nvme_configure_pi_elbas(struct nvme_ns_head *head, struct nvme_id_ns *id, struct nvme_id_ns_nvm *nvm) { u32 elbaf = le32_to_cpu(nvm->elbaf[nvme_lbaf_index(id->flbas)]); + u8 guard_type; /* no support for storage tag formats right now */ if (nvme_elbaf_sts(elbaf)) return; - head->guard_type = nvme_elbaf_guard_type(elbaf); + guard_type = nvme_elbaf_guard_type(elbaf); + if ((nvm->pic & NVME_ID_NS_NVM_QPIFS) && + guard_type == NVME_NVM_NS_QTYPE_GUARD) + guard_type = nvme_elbaf_qualified_guard_type(elbaf); + + head->guard_type = guard_type; switch (head->guard_type) { case NVME_NVM_NS_64B_GUARD: head->pi_size = sizeof(struct crc64_pi_tuple); @@ -1864,12 +1964,11 @@ static void nvme_configure_pi_elbas(struct nvme_ns_head *head, static void nvme_configure_metadata(struct nvme_ctrl *ctrl, struct nvme_ns_head *head, struct nvme_id_ns *id, - struct nvme_id_ns_nvm *nvm) + struct nvme_id_ns_nvm *nvm, struct nvme_ns_info *info) { head->features &= ~(NVME_NS_METADATA_SUPPORTED | NVME_NS_EXT_LBAS); head->pi_type = 0; head->pi_size = 0; - head->pi_offset = 0; head->ms = le16_to_cpu(id->lbaf[nvme_lbaf_index(id->flbas)].ms); if (!head->ms || !(ctrl->ops->flags & NVME_F_METADATA_SUPPORTED)) return; @@ -1883,8 +1982,12 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl, if (head->pi_size && head->ms >= head->pi_size) head->pi_type = id->dps & NVME_NS_DPS_PI_MASK; - if (!(id->dps & NVME_NS_DPS_PI_FIRST)) - head->pi_offset = head->ms - head->pi_size; + if (!(id->dps & NVME_NS_DPS_PI_FIRST)) { + if (disable_pi_offsets) + head->pi_type = 0; + else + info->pi_offset = head->ms - head->pi_size; + } if (ctrl->ops->flags & NVME_F_FABRICS) { /* @@ -1922,19 +2025,57 @@ static void nvme_configure_metadata(struct nvme_ctrl *ctrl, } } + +static u32 nvme_configure_atomic_write(struct nvme_ns *ns, + struct nvme_id_ns *id, struct queue_limits *lim, u32 bs) +{ + u32 atomic_bs, boundary = 0; + + /* + * We do not support an offset for the atomic boundaries. + */ + if (id->nabo) + return bs; + + if ((id->nsfeat & NVME_NS_FEAT_ATOMICS) && id->nawupf) { + /* + * Use the per-namespace atomic write unit when available. + */ + atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; + if (id->nabspf) + boundary = (le16_to_cpu(id->nabspf) + 1) * bs; + } else { + /* + * Use the controller wide atomic write unit. This sucks + * because the limit is defined in terms of logical blocks while + * namespaces can have different formats, and because there is + * no clear language in the specification prohibiting different + * values for different controllers in the subsystem. + */ + atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; + } + + lim->atomic_write_hw_max = atomic_bs; + lim->atomic_write_hw_boundary = boundary; + lim->atomic_write_hw_unit_min = bs; + lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs); + lim->features |= BLK_FEAT_ATOMIC_WRITES; + return atomic_bs; +} + static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl) { return ctrl->max_hw_sectors / (NVME_CTRL_PAGE_SIZE >> SECTOR_SHIFT) + 1; } static void nvme_set_ctrl_limits(struct nvme_ctrl *ctrl, - struct queue_limits *lim) + struct queue_limits *lim, bool is_admin) { lim->max_hw_sectors = ctrl->max_hw_sectors; lim->max_segments = min_t(u32, USHRT_MAX, min_not_zero(nvme_max_drv_segments(ctrl), ctrl->max_segments)); lim->max_integrity_segments = ctrl->max_integrity_segments; - lim->virt_boundary_mask = NVME_CTRL_PAGE_SIZE - 1; + lim->virt_boundary_mask = ctrl->ops->get_virt_boundary(ctrl, is_admin); lim->max_segment_size = UINT_MAX; lim->dma_alignment = 3; } @@ -1952,29 +2093,20 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, * or smaller than a sector size yet, so catch this early and don't * allow block I/O. */ - if (head->lba_shift > PAGE_SHIFT || head->lba_shift < SECTOR_SHIFT) { + if (blk_validate_block_size(bs)) { bs = (1 << 9); valid = false; } - atomic_bs = phys_bs = bs; - if (id->nabo == 0) { - /* - * Bit 1 indicates whether NAWUPF is defined for this namespace - * and whether it should be used instead of AWUPF. If NAWUPF == - * 0 then AWUPF must be used instead. - */ - if (id->nsfeat & NVME_NS_FEAT_ATOMICS && id->nawupf) - atomic_bs = (1 + le16_to_cpu(id->nawupf)) * bs; - else - atomic_bs = (1 + ns->ctrl->subsys->awupf) * bs; - } + phys_bs = bs; + atomic_bs = nvme_configure_atomic_write(ns, id, lim, bs); if (id->nsfeat & NVME_NS_FEAT_IO_OPT) { /* NPWG = Namespace Preferred Write Granularity */ phys_bs = bs * (1 + le16_to_cpu(id->npwg)); /* NOWS = Namespace Optimal Write Size */ - io_opt = bs * (1 + le16_to_cpu(id->nows)); + if (id->nows) + io_opt = bs * (1 + le16_to_cpu(id->nows)); } /* @@ -1986,7 +2118,8 @@ static bool nvme_update_disk_info(struct nvme_ns *ns, struct nvme_id_ns *id, lim->physical_block_size = min(phys_bs, atomic_bs); lim->io_min = phys_bs; lim->io_opt = io_opt; - if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) + if ((ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) && + (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM)) lim->max_write_zeroes_sectors = UINT_MAX; else lim->max_write_zeroes_sectors = ns->ctrl->max_zeroes_sectors; @@ -2040,14 +2173,16 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, struct nvme_ns_info *info) { struct queue_limits lim; + unsigned int memflags; int ret; - blk_mq_freeze_queue(ns->disk->queue); lim = queue_limits_start_update(ns->disk->queue); - nvme_set_ctrl_limits(ns->ctrl, &lim); + nvme_set_ctrl_limits(ns->ctrl, &lim, false); + + memflags = blk_mq_freeze_queue(ns->disk->queue); ret = queue_limits_commit_update(ns->disk->queue, &lim); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); - blk_mq_unfreeze_queue(ns->disk->queue); + blk_mq_unfreeze_queue(ns->disk->queue, memflags); /* Hide the block-interface for these devices */ if (!ret) @@ -2055,14 +2190,156 @@ static int nvme_update_ns_info_generic(struct nvme_ns *ns, return ret; } +static int nvme_query_fdp_granularity(struct nvme_ctrl *ctrl, + struct nvme_ns_info *info, u8 fdp_idx) +{ + struct nvme_fdp_config_log hdr, *h; + struct nvme_fdp_config_desc *desc; + size_t size = sizeof(hdr); + void *log, *end; + int i, n, ret; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, &hdr, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log header status:0x%x endgid:%d\n", ret, + info->endgid); + return ret; + } + + size = le32_to_cpu(hdr.sze); + if (size > PAGE_SIZE * MAX_ORDER_NR_PAGES) { + dev_warn(ctrl->device, "FDP config size too large:%zu\n", + size); + return 0; + } + + h = kvmalloc(size, GFP_KERNEL); + if (!h) + return -ENOMEM; + + ret = nvme_get_log_lsi(ctrl, 0, NVME_LOG_FDP_CONFIGS, 0, + NVME_CSI_NVM, h, size, 0, info->endgid); + if (ret) { + dev_warn(ctrl->device, + "FDP configs log status:0x%x endgid:%d\n", ret, + info->endgid); + goto out; + } + + n = le16_to_cpu(h->numfdpc) + 1; + if (fdp_idx > n) { + dev_warn(ctrl->device, "FDP index:%d out of range:%d\n", + fdp_idx, n); + /* Proceed without registering FDP streams */ + ret = 0; + goto out; + } + + log = h + 1; + desc = log; + end = log + size - sizeof(*h); + for (i = 0; i < fdp_idx; i++) { + log += le16_to_cpu(desc->dsze); + desc = log; + if (log >= end) { + dev_warn(ctrl->device, + "FDP invalid config descriptor list\n"); + ret = 0; + goto out; + } + } + + if (le32_to_cpu(desc->nrg) > 1) { + dev_warn(ctrl->device, "FDP NRG > 1 not supported\n"); + ret = 0; + goto out; + } + + info->runs = le64_to_cpu(desc->runs); +out: + kvfree(h); + return ret; +} + +static int nvme_query_fdp_info(struct nvme_ns *ns, struct nvme_ns_info *info) +{ + struct nvme_ns_head *head = ns->head; + struct nvme_ctrl *ctrl = ns->ctrl; + struct nvme_fdp_ruh_status *ruhs; + struct nvme_fdp_config fdp; + struct nvme_command c = {}; + size_t size; + int i, ret; + + /* + * The FDP configuration is static for the lifetime of the namespace, + * so return immediately if we've already registered this namespace's + * streams. + */ + if (head->nr_plids) + return 0; + + ret = nvme_get_features(ctrl, NVME_FEAT_FDP, info->endgid, NULL, 0, + &fdp); + if (ret) { + dev_warn(ctrl->device, "FDP get feature status:0x%x\n", ret); + return ret; + } + + if (!(fdp.flags & FDPCFG_FDPE)) + return 0; + + ret = nvme_query_fdp_granularity(ctrl, info, fdp.fdpcidx); + if (!info->runs) + return ret; + + size = struct_size(ruhs, ruhsd, S8_MAX - 1); + ruhs = kzalloc(size, GFP_KERNEL); + if (!ruhs) + return -ENOMEM; + + c.imr.opcode = nvme_cmd_io_mgmt_recv; + c.imr.nsid = cpu_to_le32(head->ns_id); + c.imr.mo = NVME_IO_MGMT_RECV_MO_RUHS; + c.imr.numd = cpu_to_le32(nvme_bytes_to_numd(size)); + ret = nvme_submit_sync_cmd(ns->queue, &c, ruhs, size); + if (ret) { + dev_warn(ctrl->device, "FDP io-mgmt status:0x%x\n", ret); + goto free; + } + + head->nr_plids = le16_to_cpu(ruhs->nruhsd); + if (!head->nr_plids) + goto free; + + head->plids = kcalloc(head->nr_plids, sizeof(*head->plids), + GFP_KERNEL); + if (!head->plids) { + dev_warn(ctrl->device, + "failed to allocate %u FDP placement IDs\n", + head->nr_plids); + head->nr_plids = 0; + ret = -ENOMEM; + goto free; + } + + for (i = 0; i < head->nr_plids; i++) + head->plids[i] = le16_to_cpu(ruhs->ruhsd[i].pid); +free: + kfree(ruhs); + return ret; +} + static int nvme_update_ns_info_block(struct nvme_ns *ns, struct nvme_ns_info *info) { - bool vwc = ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT; struct queue_limits lim; struct nvme_id_ns_nvm *nvm = NULL; struct nvme_zone_info zi = {}; struct nvme_id_ns *id; + unsigned int memflags; sector_t capacity; unsigned lbaf; int ret; @@ -2092,26 +2369,36 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, goto out; } - blk_mq_freeze_queue(ns->disk->queue); + if (ns->ctrl->ctratt & NVME_CTRL_ATTR_FDPS) { + ret = nvme_query_fdp_info(ns, info); + if (ret < 0) + goto out; + } + + lim = queue_limits_start_update(ns->disk->queue); + + memflags = blk_mq_freeze_queue(ns->disk->queue); ns->head->lba_shift = id->lbaf[lbaf].ds; ns->head->nuse = le64_to_cpu(id->nuse); capacity = nvme_lba_to_sect(ns->head, le64_to_cpu(id->nsze)); - - lim = queue_limits_start_update(ns->disk->queue); - nvme_set_ctrl_limits(ns->ctrl, &lim); - nvme_configure_metadata(ns->ctrl, ns->head, id, nvm); + nvme_set_ctrl_limits(ns->ctrl, &lim, false); + nvme_configure_metadata(ns->ctrl, ns->head, id, nvm, info); nvme_set_chunk_sectors(ns, id, &lim); if (!nvme_update_disk_info(ns, id, &lim)) capacity = 0; + nvme_config_discard(ns, &lim); if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && ns->head->ids.csi == NVME_CSI_ZNS) nvme_update_zone_info(ns, &lim, &zi); - ret = queue_limits_commit_update(ns->disk->queue, &lim); - if (ret) { - blk_mq_unfreeze_queue(ns->disk->queue); - goto out; - } + + if ((ns->ctrl->vwc & NVME_CTRL_VWC_PRESENT) && !info->no_vwc) + lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA; + else + lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA); + + if (info->is_rotational) + lim.features |= BLK_FEAT_ROTATIONAL; /* * Register a metadata profile for PI, or the plain non-integrity NVMe @@ -2119,10 +2406,14 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * I/O to namespaces with metadata except when the namespace supports * PI, as it can strip/insert in that case. */ - if (!nvme_init_integrity(ns->disk, ns->head)) + if (!nvme_init_integrity(ns->head, &lim, info)) capacity = 0; - set_capacity_and_notify(ns->disk, capacity); + lim.max_write_streams = ns->head->nr_plids; + if (lim.max_write_streams) + lim.write_stream_granularity = min(info->runs, U32_MAX); + else + lim.write_stream_granularity = 0; /* * Only set the DEAC bit if the device guarantees that reads from @@ -2130,12 +2421,21 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, * require that, it must be a no-op if reads from deallocated data * do not return zeroes. */ - if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) + if ((id->dlfeat & 0x7) == 0x1 && (id->dlfeat & (1 << 3))) { ns->head->features |= NVME_NS_DEAC; + lim.max_hw_wzeroes_unmap_sectors = lim.max_write_zeroes_sectors; + } + + ret = queue_limits_commit_update(ns->disk->queue, &lim); + if (ret) { + blk_mq_unfreeze_queue(ns->disk->queue, memflags); + goto out; + } + + set_capacity_and_notify(ns->disk, capacity); set_disk_ro(ns->disk, nvme_ns_is_readonly(ns, info)); - blk_queue_write_cache(ns->disk->queue, vwc, vwc); set_bit(NVME_NS_READY, &ns->flags); - blk_mq_unfreeze_queue(ns->disk->queue); + blk_mq_unfreeze_queue(ns->disk->queue, memflags); if (blk_queue_is_zoned(ns->queue)) { ret = blk_revalidate_disk_zones(ns->disk); @@ -2191,16 +2491,10 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) if (!ret && nvme_ns_head_multipath(ns->head)) { struct queue_limits *ns_lim = &ns->disk->queue->limits; struct queue_limits lim; + unsigned int memflags; - blk_mq_freeze_queue(ns->head->disk->queue); - if (unsupported) - ns->head->disk->flags |= GENHD_FL_HIDDEN; - else - nvme_init_integrity(ns->head->disk, ns->head); - set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); - set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); - nvme_mpath_revalidate_paths(ns); - + lim = queue_limits_start_update(ns->head->disk->queue); + memflags = blk_mq_freeze_queue(ns->head->disk->queue); /* * queue_limits mixes values that are the hardware limitations * for bio splitting with what is the device configuration. @@ -2216,20 +2510,56 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_ns_info *info) * the splitting limits in to make sure we still obey possibly * lower limitations of other controllers. */ - lim = queue_limits_start_update(ns->head->disk->queue); lim.logical_block_size = ns_lim->logical_block_size; lim.physical_block_size = ns_lim->physical_block_size; lim.io_min = ns_lim->io_min; lim.io_opt = ns_lim->io_opt; queue_limits_stack_bdev(&lim, ns->disk->part0, 0, ns->head->disk->disk_name); + if (unsupported) + ns->head->disk->flags |= GENHD_FL_HIDDEN; + else + nvme_init_integrity(ns->head, &lim, info); + lim.max_write_streams = ns_lim->max_write_streams; + lim.write_stream_granularity = ns_lim->write_stream_granularity; ret = queue_limits_commit_update(ns->head->disk->queue, &lim); - blk_mq_unfreeze_queue(ns->head->disk->queue); + + set_capacity_and_notify(ns->head->disk, get_capacity(ns->disk)); + set_disk_ro(ns->head->disk, nvme_ns_is_readonly(ns, info)); + nvme_mpath_revalidate_paths(ns); + + blk_mq_unfreeze_queue(ns->head->disk->queue, memflags); } return ret; } +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_ids *ids = &ns->head->ids; + + if (type != BLK_UID_EUI64) + return -EINVAL; + + if (memchr_inv(ids->nguid, 0, sizeof(ids->nguid))) { + memcpy(id, &ids->nguid, sizeof(ids->nguid)); + return sizeof(ids->nguid); + } + if (memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) { + memcpy(id, &ids->eui64, sizeof(ids->eui64)); + return sizeof(ids->eui64); + } + + return -EINVAL; +} + +static int nvme_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + return nvme_ns_get_unique_id(disk->private_data, id, type); +} + #ifdef CONFIG_BLK_SED_OPAL static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, bool send) @@ -2269,10 +2599,9 @@ static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended) #ifdef CONFIG_BLK_DEV_ZONED static int nvme_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) + unsigned int nr_zones, struct blk_report_zones_args *args) { - return nvme_ns_report_zones(disk->private_data, sector, nr_zones, cb, - data); + return nvme_ns_report_zones(disk->private_data, sector, nr_zones, args); } #else #define nvme_report_zones NULL @@ -2285,6 +2614,7 @@ const struct block_device_operations nvme_bdev_ops = { .open = nvme_open, .release = nvme_release, .getgeo = nvme_getgeo, + .get_unique_id = nvme_get_unique_id, .report_zones = nvme_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -2367,8 +2697,13 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) - ctrl->ctrl_config |= NVME_CC_CRIME; + /* + * Setting CRIME results in CSTS.RDY before the media is ready. This + * makes it possible for media related commands to return the error + * NVME_SC_ADMIN_COMMAND_MEDIA_NOT_READY. Until the driver is + * restructured to handle retries, disable CC.CRIME. + */ + ctrl->ctrl_config &= ~NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2377,11 +2712,6 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) if (ret) return ret; - /* Flush write to device (required if transport is PCI) */ - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CC, &ctrl->ctrl_config); - if (ret) - return ret; - /* CAP value may change after initial CC write */ ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); if (ret) @@ -2403,10 +2733,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) * devices are known to get this wrong. Use the larger of the * two values. */ - if (ctrl->ctrl_config & NVME_CC_CRIME) - ready_timeout = NVME_CRTO_CRIMT(crto); - else - ready_timeout = NVME_CRTO_CRWMT(crto); + ready_timeout = NVME_CRTO_CRWMT(crto); if (ready_timeout < timeout) dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", @@ -2834,6 +3161,16 @@ static inline bool nvme_discovery_ctrl(struct nvme_ctrl *ctrl) return ctrl->opts && ctrl->opts->discovery_nqn; } +static inline bool nvme_admin_ctrl(struct nvme_ctrl *ctrl) +{ + return ctrl->cntrltype == NVME_CTRL_ADMIN; +} + +static inline bool nvme_is_io_ctrl(struct nvme_ctrl *ctrl) +{ + return !nvme_discovery_ctrl(ctrl) && !nvme_admin_ctrl(ctrl); +} + static bool nvme_validate_cntlid(struct nvme_subsystem *subsys, struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) { @@ -2884,6 +3221,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) memcpy(subsys->model, id->mn, sizeof(subsys->model)); subsys->vendor_id = le16_to_cpu(id->vid); subsys->cmic = id->cmic; + subsys->awupf = le16_to_cpu(id->awupf); /* Versions prior to 1.4 don't necessarily report a valid type */ if (id->cntrltype == NVME_CTRL_DISC || @@ -2899,7 +3237,6 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) kfree(subsys); return -EINVAL; } - subsys->awupf = le16_to_cpu(id->awupf); nvme_mpath_default_iopolicy(subsys); subsys->dev.class = &nvme_subsys_class; @@ -2952,8 +3289,8 @@ out_unlock: return ret; } -int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, - void *log, size_t size, u64 offset) +static int nvme_get_log_lsi(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, + u8 lsp, u8 csi, void *log, size_t size, u64 offset, u16 lsi) { struct nvme_command c = { }; u32 dwlen = nvme_bytes_to_numd(size); @@ -2967,14 +3304,22 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); c.get_log_page.lpou = cpu_to_le32(upper_32_bits(offset)); c.get_log_page.csi = csi; + c.get_log_page.lsi = cpu_to_le16(lsi); return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, + void *log, size_t size, u64 offset) +{ + return nvme_get_log_lsi(ctrl, nsid, log_page, lsp, csi, log, size, + offset, 0); +} + static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { - struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); + struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi); int ret; if (cel) @@ -2991,7 +3336,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, return ret; } - xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + if (xa_is_err(old)) { + kfree(cel); + return xa_err(old); + } out: *log = cel; return 0; @@ -3024,8 +3373,8 @@ static int nvme_init_non_mdts_limits(struct nvme_ctrl *ctrl) else ctrl->max_zeroes_sectors = 0; - if (ctrl->subsys->subtype != NVME_NQN_NVME || - nvme_ctrl_limited_cns(ctrl) || + if (!nvme_is_io_ctrl(ctrl) || + !nvme_id_cns_ok(ctrl, NVME_ID_CNS_CS_CTRL) || test_bit(NVME_CTRL_SKIP_ID_CNS_CS, &ctrl->flags)) return 0; @@ -3053,6 +3402,25 @@ free_data: return ret; } +static int nvme_init_effects_log(struct nvme_ctrl *ctrl, + u8 csi, struct nvme_effects_log **log) +{ + struct nvme_effects_log *effects, *old; + + effects = kzalloc(sizeof(*effects), GFP_KERNEL); + if (!effects) + return -ENOMEM; + + old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL); + if (xa_is_err(old)) { + kfree(effects); + return xa_err(old); + } + + *log = effects; + return 0; +} + static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) { struct nvme_effects_log *log = ctrl->effects; @@ -3099,10 +3467,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } if (!ctrl->effects) { - ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); - if (!ctrl->effects) - return -ENOMEM; - xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); + ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); + if (ret < 0) + return ret; } nvme_init_known_nvm_effects(ctrl); @@ -3128,14 +3495,14 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct return -EINVAL; } - if (!nvme_discovery_ctrl(ctrl) && ctrl->ioccsz < 4) { + if (nvme_is_io_ctrl(ctrl) && ctrl->ioccsz < 4) { dev_err(ctrl->device, "I/O queue command capsule supported size %d < 4\n", ctrl->ioccsz); return -EINVAL; } - if (!nvme_discovery_ctrl(ctrl) && ctrl->iorcsz < 1) { + if (nvme_is_io_ctrl(ctrl) && ctrl->iorcsz < 1) { dev_err(ctrl->device, "I/O queue response capsule supported size %d < 1\n", ctrl->iorcsz); @@ -3143,8 +3510,9 @@ static int nvme_check_ctrl_fabric_info(struct nvme_ctrl *ctrl, struct nvme_id_ct } if (!ctrl->maxcmd) { - dev_err(ctrl->device, "Maximum outstanding commands is 0\n"); - return -EINVAL; + dev_warn(ctrl->device, + "Firmware bug: maximum outstanding commands is 0\n"); + ctrl->maxcmd = ctrl->sqsize + 1; } return 0; @@ -3220,7 +3588,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) min_not_zero(ctrl->max_hw_sectors, max_hw_sectors); lim = queue_limits_start_update(ctrl->admin_q); - nvme_set_ctrl_limits(ctrl, &lim); + nvme_set_ctrl_limits(ctrl, &lim, true); ret = queue_limits_commit_update(ctrl->admin_q, &lim); if (ret) goto out_free; @@ -3286,7 +3654,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) dev_pm_qos_hide_latency_tolerance(ctrl->device); - out_free: kfree(id); return ret; @@ -3316,6 +3683,17 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended) if (ret) return ret; + if (nvme_admin_ctrl(ctrl)) { + /* + * An admin controller has one admin queue, but no I/O queues. + * Override queue_count so it only creates an admin queue. + */ + dev_dbg(ctrl->device, + "Subsystem %s is an administrative controller", + ctrl->subsys->subnqn); + ctrl->queue_count = 1; + } + ret = nvme_configure_apst(ctrl); if (ret < 0) return ret; @@ -3405,7 +3783,7 @@ static struct nvme_ns_head *nvme_find_ns_head(struct nvme_ctrl *ctrl, */ if (h->ns_id != nsid || !nvme_is_unique_nsid(ctrl, h)) continue; - if (!list_empty(&h->list) && nvme_tryget_ns_head(h)) + if (nvme_tryget_ns_head(h)) return h; } @@ -3529,6 +3907,7 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, head->ns_id = info->nsid; head->ids = info->ids; head->shared = info->is_shared; + head->rotational = info->is_rotational; ratelimit_state_init(&head->rs_nuse, 5 * HZ, 1); ratelimit_set_flags(&head->rs_nuse, RATELIMIT_MSG_ON_RELEASE); kref_init(&head->ref); @@ -3648,7 +4027,8 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) } } else { ret = -EINVAL; - if (!info->is_shared || !head->shared) { + if ((!info->is_shared || !head->shared) && + !list_empty(&head->list)) { dev_err(ctrl->device, "Duplicate unshared namespace %d\n", info->nsid); @@ -3666,13 +4046,17 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info) "Found shared namespace %d, but multipathing not supported.\n", info->nsid); dev_warn_once(ctrl->device, - "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n"); + "Shared namespace support requires core_nvme.multipath=Y.\n"); } } list_add_tail_rcu(&ns->siblings, &head->list); ns->head = head; mutex_unlock(&ctrl->subsys->lock); + +#ifdef CONFIG_NVME_MULTIPATH + cancel_delayed_work(&head->remove_work); +#endif return 0; out_put_ns_head: @@ -3688,7 +4072,8 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (ns->head->ns_id == nsid) { if (!nvme_get_ns(ns)) continue; @@ -3701,7 +4086,7 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid) srcu_read_unlock(&ctrl->srcu, srcu_idx); return ret; } -EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, "NVME_TARGET_PASSTHRU"); /* * Add the namespace to the controller list while keeping the list ordered. @@ -3716,20 +4101,28 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) return; } } - list_add(&ns->list, &ns->ctrl->namespaces); + list_add_rcu(&ns->list, &ns->ctrl->namespaces); } static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) { + struct queue_limits lim = { }; struct nvme_ns *ns; struct gendisk *disk; int node = ctrl->numa_node; + bool last_path = false; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) return; - disk = blk_mq_alloc_disk(ctrl->tagset, NULL, ns); + if (ctrl->opts && ctrl->opts->data_digest) + lim.features |= BLK_FEAT_STABLE_WRITES; + if (ctrl->ops->supports_pci_p2pdma && + ctrl->ops->supports_pci_p2pdma(ctrl)) + lim.features |= BLK_FEAT_PCI_P2PDMA; + + disk = blk_mq_alloc_disk(ctrl->tagset, &lim, ns); if (IS_ERR(disk)) goto out_free_ns; disk->fops = &nvme_bdev_ops; @@ -3737,15 +4130,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) ns->disk = disk; ns->queue = disk->queue; - - if (ctrl->opts && ctrl->opts->data_digest) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, ns->queue); - - blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); - if (ctrl->ops->supports_pci_p2pdma && - ctrl->ops->supports_pci_p2pdma(ctrl)) - blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue); - ns->ctrl = ctrl; kref_init(&ns->kref); @@ -3819,9 +4203,22 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) out_unlink_ns: mutex_lock(&ctrl->subsys->lock); list_del_rcu(&ns->siblings); - if (list_empty(&ns->head->list)) + if (list_empty(&ns->head->list)) { list_del_init(&ns->head->entry); + /* + * If multipath is not configured, we still create a namespace + * head (nshead), but head->disk is not initialized in that + * case. As a result, only a single reference to nshead is held + * (via kref_init()) when it is created. Therefore, ensure that + * we do not release the reference to nshead twice if head->disk + * is not present. + */ + if (ns->head->disk) + last_path = true; + } mutex_unlock(&ctrl->subsys->lock); + if (last_path) + nvme_put_ns_head(ns->head); nvme_put_ns_head(ns->head); out_cleanup_disk: put_disk(disk); @@ -3853,7 +4250,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) mutex_lock(&ns->ctrl->subsys->lock); list_del_rcu(&ns->siblings); if (list_empty(&ns->head->list)) { - list_del_init(&ns->head->entry); + if (!nvme_mpath_queue_if_no_path(ns->head)) + list_del_init(&ns->head->entry); last_path = true; } mutex_unlock(&ns->ctrl->subsys->lock); @@ -3863,6 +4261,9 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (!nvme_ns_head_multipath(ns->head)) nvme_cdev_del(&ns->cdev, &ns->cdev_device); + + nvme_mpath_remove_sysfs_link(ns); + del_gendisk(ns->disk); mutex_lock(&ns->ctrl->namespaces_lock); @@ -3871,7 +4272,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) synchronize_srcu(&ns->ctrl->srcu); if (last_path) - nvme_mpath_shutdown_disk(ns->head); + nvme_mpath_remove_disk(ns->head); nvme_put_ns(ns); } @@ -3887,7 +4288,7 @@ static void nvme_ns_remove_by_nsid(struct nvme_ctrl *ctrl, u32 nsid) static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_info *info) { - int ret = NVME_SC_INVALID_NS | NVME_SC_DNR; + int ret = NVME_SC_INVALID_NS | NVME_STATUS_DNR; if (!nvme_ns_ids_equal(&ns->head->ids, &info->ids)) { dev_err(ns->ctrl->device, @@ -3903,7 +4304,7 @@ out: * * TODO: we should probably schedule a delayed retry here. */ - if (ret > 0 && (ret & NVME_SC_DNR)) + if (ret > 0 && (ret & NVME_STATUS_DNR)) nvme_ns_remove(ns); } @@ -3911,7 +4312,7 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns_info info = { .nsid = nsid }; struct nvme_ns *ns; - int ret; + int ret = 1; if (nvme_identify_ns_descs(ctrl, &info)) return; @@ -3923,14 +4324,15 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) } /* - * If available try to use the Command Set Idependent Identify Namespace + * If available try to use the Command Set Independent Identify Namespace * data structure to find all the generic information that is needed to * set up a namespace. If not fall back to the legacy version. */ if ((ctrl->cap & NVME_CAP_CRMS_CRIMS) || - (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS)) + (info.ids.csi != NVME_CSI_NVM && info.ids.csi != NVME_CSI_ZNS) || + ctrl->vs >= NVME_VS(2, 0, 0)) ret = nvme_ns_info_from_id_cs_indep(ctrl, &info); - else + if (ret > 0) ret = nvme_ns_info_from_identify(ctrl, &info); if (info.is_removed) @@ -3952,6 +4354,35 @@ static void nvme_scan_ns(struct nvme_ctrl *ctrl, unsigned nsid) } } +/** + * struct async_scan_info - keeps track of controller & NSIDs to scan + * @ctrl: Controller on which namespaces are being scanned + * @next_nsid: Index of next NSID to scan in ns_list + * @ns_list: Pointer to list of NSIDs to scan + * + * Note: There is a single async_scan_info structure shared by all instances + * of nvme_scan_ns_async() scanning a given controller, so the atomic + * operations on next_nsid are critical to ensure each instance scans a unique + * NSID. + */ +struct async_scan_info { + struct nvme_ctrl *ctrl; + atomic_t next_nsid; + __le32 *ns_list; +}; + +static void nvme_scan_ns_async(void *data, async_cookie_t cookie) +{ + struct async_scan_info *scan_info = data; + int idx; + u32 nsid; + + idx = (u32)atomic_fetch_inc(&scan_info->next_nsid); + nsid = le32_to_cpu(scan_info->ns_list[idx]); + + nvme_scan_ns(scan_info->ctrl, nsid); +} + static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl, unsigned nsid) { @@ -3978,11 +4409,15 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) __le32 *ns_list; u32 prev = 0; int ret = 0, i; + ASYNC_DOMAIN(domain); + struct async_scan_info scan_info; ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL); if (!ns_list) return -ENOMEM; + scan_info.ctrl = ctrl; + scan_info.ns_list = ns_list; for (;;) { struct nvme_command cmd = { .identify.opcode = nvme_admin_identify, @@ -3998,19 +4433,23 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl) goto free; } + atomic_set(&scan_info.next_nsid, 0); for (i = 0; i < nr_entries; i++) { u32 nsid = le32_to_cpu(ns_list[i]); if (!nsid) /* end of the list? */ goto out; - nvme_scan_ns(ctrl, nsid); + async_schedule_domain(nvme_scan_ns_async, &scan_info, + &domain); while (++prev < nsid) nvme_ns_remove_by_nsid(ctrl, prev); } + async_synchronize_full_domain(&domain); } out: nvme_remove_invalid_namespaces(ctrl, prev); free: + async_synchronize_full_domain(&domain); kfree(ns_list); return ret; } @@ -4086,7 +4525,7 @@ static void nvme_scan_work(struct work_struct *work) } mutex_lock(&ctrl->scan_lock); - if (nvme_ctrl_limited_cns(ctrl)) { + if (!nvme_id_cns_ok(ctrl, NVME_ID_CNS_NS_ACTIVE_LIST)) { nvme_scan_ns_sequential(ctrl); } else { /* @@ -4095,10 +4534,19 @@ static void nvme_scan_work(struct work_struct *work) * they report) but don't actually support it. */ ret = nvme_scan_ns_list(ctrl); - if (ret > 0 && ret & NVME_SC_DNR) + if (ret > 0 && ret & NVME_STATUS_DNR) nvme_scan_ns_sequential(ctrl); } mutex_unlock(&ctrl->scan_lock); + + /* Requeue if we have missed AENs */ + if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) + nvme_queue_scan(ctrl); +#ifdef CONFIG_NVME_MULTIPATH + else if (ctrl->ana_log_buf) + /* Re-read the ANA log page to not miss updates */ + queue_work(nvme_wq, &ctrl->ana_work); +#endif } /* @@ -4273,11 +4721,9 @@ static void nvme_fw_act_work(struct work_struct *work) nvme_auth_stop(ctrl); if (ctrl->mtfa) - fw_act_timeout = jiffies + - msecs_to_jiffies(ctrl->mtfa * 100); + fw_act_timeout = jiffies + msecs_to_jiffies(ctrl->mtfa * 100); else - fw_act_timeout = jiffies + - msecs_to_jiffies(admin_timeout * 1000); + fw_act_timeout = jiffies + secs_to_jiffies(admin_timeout); nvme_quiesce_io_queues(ctrl); while (nvme_ctrl_pp_status(ctrl)) { @@ -4290,7 +4736,8 @@ static void nvme_fw_act_work(struct work_struct *work) msleep(100); } - if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || + !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_unquiesce_io_queues(ctrl); @@ -4349,7 +4796,8 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl) { - dev_warn(ctrl->device, "resetting controller due to AER\n"); + dev_warn(ctrl->device, + "resetting controller due to persistent internal error\n"); nvme_reset_ctrl(ctrl); } @@ -4406,7 +4854,6 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, /* Reserved for fabric connect and keep alive */ set->reserved_tags = 2; set->numa_node = ctrl->numa_node; - set->flags = BLK_MQ_F_NO_SCHED; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; set->cmd_size = cmd_size; @@ -4447,8 +4894,12 @@ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) { + /* + * As we're about to destroy the queue and free tagset + * we can not have keep-alive work running. + */ + nvme_stop_keep_alive(ctrl); blk_mq_destroy_queue(ctrl->admin_q); - blk_put_queue(ctrl->admin_q); if (ctrl->ops->flags & NVME_F_FABRICS) { blk_mq_destroy_queue(ctrl->fabrics_q); blk_put_queue(ctrl->fabrics_q); @@ -4476,10 +4927,9 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, /* Reserved for fabric connect */ set->reserved_tags = 1; set->numa_node = ctrl->numa_node; - set->flags = BLK_MQ_F_SHOULD_MERGE; if (ctrl->ops->flags & NVME_F_BLOCKING) set->flags |= BLK_MQ_F_BLOCKING; - set->cmd_size = cmd_size, + set->cmd_size = cmd_size; set->driver_data = ctrl; set->nr_hw_queues = ctrl->queue_count - 1; set->timeout = NVME_IO_TIMEOUT; @@ -4489,13 +4939,15 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return ret; if (ctrl->ops->flags & NVME_F_FABRICS) { - ctrl->connect_q = blk_mq_alloc_queue(set, NULL, NULL); + struct queue_limits lim = { + .features = BLK_FEAT_SKIP_TAGSET_QUIESCE, + }; + + ctrl->connect_q = blk_mq_alloc_queue(set, &lim, NULL); if (IS_ERR(ctrl->connect_q)) { ret = PTR_ERR(ctrl->connect_q); goto out_free_tag_set; } - blk_queue_flag_set(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, - ctrl->connect_q); } ctrl->tagset = set; @@ -4522,7 +4974,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { nvme_mpath_stop(ctrl); nvme_auth_stop(ctrl); - nvme_stop_keep_alive(ctrl); nvme_stop_failfast_work(ctrl); flush_work(&ctrl->async_event_work); cancel_work_sync(&ctrl->fw_act_work); @@ -4542,8 +4993,14 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl) * checking that they started once before, hence are reconnecting back. */ if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) && - nvme_discovery_ctrl(ctrl)) + nvme_discovery_ctrl(ctrl)) { + if (!ctrl->kato) { + nvme_stop_keep_alive(ctrl); + ctrl->kato = NVME_DEFAULT_KATO; + nvme_start_keep_alive(ctrl); + } nvme_change_uevent(ctrl, "NVME_EVENT=rediscover"); + } if (ctrl->queue_count > 1) { nvme_queue_scan(ctrl); @@ -4558,6 +5015,7 @@ EXPORT_SYMBOL_GPL(nvme_start_ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) { + nvme_stop_keep_alive(ctrl); nvme_hwmon_exit(ctrl); nvme_fault_inject_fini(&ctrl->fault_inject); dev_pm_qos_hide_latency_tolerance(ctrl->device); @@ -4585,9 +5043,10 @@ static void nvme_free_ctrl(struct device *dev) container_of(dev, struct nvme_ctrl, ctrl_device); struct nvme_subsystem *subsys = ctrl->subsys; + if (ctrl->admin_q) + blk_put_queue(ctrl->admin_q); if (!subsys || ctrl->instance != subsys->instance) ida_free(&nvme_instance_ida, ctrl->instance); - key_put(ctrl->tls_key); nvme_free_cels(ctrl); nvme_mpath_uninit(ctrl); cleanup_srcu_struct(&ctrl->srcu); @@ -4613,6 +5072,9 @@ static void nvme_free_ctrl(struct device *dev) * Initialize a NVMe controller structures. This needs to be called during * earliest initialization so that we have the initialized structured around * during probing. + * + * On success, the caller must use the nvme_put_ctrl() to release this when + * needed, which also invokes the ops->free_ctrl() callback. */ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks) @@ -4661,6 +5123,12 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, goto out; ctrl->instance = ret; + ret = nvme_auth_init_ctrl(ctrl); + if (ret) + goto out_release_instance; + + nvme_mpath_init_ctrl(ctrl); + device_initialize(&ctrl->ctrl_device); ctrl->device = &ctrl->ctrl_device; ctrl->device->devt = MKDEV(MAJOR(nvme_ctrl_base_chr_devt), @@ -4673,16 +5141,36 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, ctrl->device->groups = nvme_dev_attr_groups; ctrl->device->release = nvme_free_ctrl; dev_set_drvdata(ctrl->device, ctrl); + + return ret; + +out_release_instance: + ida_free(&nvme_instance_ida, ctrl->instance); +out: + if (ctrl->discard_page) + __free_page(ctrl->discard_page); + cleanup_srcu_struct(&ctrl->srcu); + return ret; +} +EXPORT_SYMBOL_GPL(nvme_init_ctrl); + +/* + * On success, returns with an elevated controller reference and caller must + * use nvme_uninit_ctrl() to properly free resources associated with the ctrl. + */ +int nvme_add_ctrl(struct nvme_ctrl *ctrl) +{ + int ret; + ret = dev_set_name(ctrl->device, "nvme%d", ctrl->instance); if (ret) - goto out_release_instance; + return ret; - nvme_get_ctrl(ctrl); cdev_init(&ctrl->cdev, &nvme_dev_fops); - ctrl->cdev.owner = ops->module; + ctrl->cdev.owner = ctrl->ops->module; ret = cdev_device_add(&ctrl->cdev, ctrl->device); if (ret) - goto out_free_name; + return ret; /* * Initialize latency tolerance controls. The sysfs files won't @@ -4693,28 +5181,11 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, min(default_ps_max_latency_us, (unsigned long)S32_MAX)); nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device)); - nvme_mpath_init_ctrl(ctrl); - ret = nvme_auth_init_ctrl(ctrl); - if (ret) - goto out_free_cdev; + nvme_get_ctrl(ctrl); return 0; -out_free_cdev: - nvme_fault_inject_fini(&ctrl->fault_inject); - dev_pm_qos_hide_latency_tolerance(ctrl->device); - cdev_device_del(&ctrl->cdev, ctrl->device); -out_free_name: - nvme_put_ctrl(ctrl); - kfree_const(ctrl->device->kobj.name); -out_release_instance: - ida_free(&nvme_instance_ida, ctrl->instance); -out: - if (ctrl->discard_page) - __free_page(ctrl->discard_page); - cleanup_srcu_struct(&ctrl->srcu); - return ret; } -EXPORT_SYMBOL_GPL(nvme_init_ctrl); +EXPORT_SYMBOL_GPL(nvme_add_ctrl); /* let I/O to all namespaces fail in preparation for surprise removal */ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) @@ -4723,7 +5194,8 @@ void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mark_disk_dead(ns->disk); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4735,8 +5207,9 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) - blk_mq_unfreeze_queue(ns->queue); + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) + blk_mq_unfreeze_queue_non_owner(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); clear_bit(NVME_CTRL_FROZEN, &ctrl->flags); } @@ -4748,7 +5221,8 @@ int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout); if (timeout <= 0) break; @@ -4764,7 +5238,8 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_mq_freeze_queue_wait(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4777,8 +5252,14 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl) set_bit(NVME_CTRL_FROZEN, &ctrl->flags); srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) - blk_freeze_queue_start(ns->queue); + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) + /* + * Typical non_owner use case is from pci driver, in which + * start_freeze is called from timeout work function, but + * unfreeze is done in reset work context + */ + blk_freeze_queue_start_non_owner(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } EXPORT_SYMBOL_GPL(nvme_start_freeze); @@ -4825,7 +5306,8 @@ void nvme_sync_io_queues(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) blk_sync_queue(ns->queue); srcu_read_unlock(&ctrl->srcu, srcu_idx); } @@ -4845,7 +5327,7 @@ struct nvme_ctrl *nvme_ctrl_from_file(struct file *file) return NULL; return file->private_data; } -EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, NVME_TARGET_PASSTHRU); +EXPORT_SYMBOL_NS_GPL(nvme_ctrl_from_file, "NVME_TARGET_PASSTHRU"); /* * Check we didn't inadvertently grow the command structure sizes: @@ -4873,6 +5355,8 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_id_ctrl_nvm) != NVME_IDENTIFY_DATA_SIZE); BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64); BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512); + BUILD_BUG_ON(sizeof(struct nvme_endurance_group_log) != 512); + BUILD_BUG_ON(sizeof(struct nvme_rotational_media_log) != 512); BUILD_BUG_ON(sizeof(struct nvme_dbbuf) != 64); BUILD_BUG_ON(sizeof(struct nvme_directive_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_feat_host_behavior) != 512); @@ -4881,22 +5365,20 @@ static inline void _nvme_check_size(void) static int __init nvme_core_init(void) { + unsigned int wq_flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS; int result = -ENOMEM; _nvme_check_size(); - nvme_wq = alloc_workqueue("nvme-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_wq = alloc_workqueue("nvme-wq", wq_flags, 0); if (!nvme_wq) goto out; - nvme_reset_wq = alloc_workqueue("nvme-reset-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_reset_wq = alloc_workqueue("nvme-reset-wq", wq_flags, 0); if (!nvme_reset_wq) goto destroy_wq; - nvme_delete_wq = alloc_workqueue("nvme-delete-wq", - WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); + nvme_delete_wq = alloc_workqueue("nvme-delete-wq", wq_flags, 0); if (!nvme_delete_wq) goto destroy_reset_wq; diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index ceb9c0ed3120..55a8afd2efd5 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -187,7 +187,7 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } @@ -233,7 +233,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) if (unlikely(ret != 0)) dev_err(ctrl->device, "Property Get error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_read64); @@ -275,11 +275,26 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) if (unlikely(ret)) dev_err(ctrl->device, "Property Set error: %d, offset %#x\n", - ret > 0 ? ret & ~NVME_SC_DNR : ret, off); + ret > 0 ? ret & ~NVME_STATUS_DNR : ret, off); return ret; } EXPORT_SYMBOL_GPL(nvmf_reg_write32); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl) +{ + int ret; + + if (!nvme_wait_reset(ctrl)) + return -EBUSY; + + ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, NVME_SUBSYS_RESET); + if (ret) + return ret; + + return nvme_try_sched_reset(ctrl); +} +EXPORT_SYMBOL_GPL(nvmf_subsystem_reset); + /** * nvmf_log_connect_error() - Error-parsing-diagnostic print out function for * connect() errors. @@ -295,7 +310,7 @@ static void nvmf_log_connect_error(struct nvme_ctrl *ctrl, int errval, int offset, struct nvme_command *cmd, struct nvmf_connect_data *data) { - int err_sctype = errval & ~NVME_SC_DNR; + int err_sctype = errval & ~NVME_STATUS_DNR; if (errval < 0) { dev_err(ctrl->device, @@ -457,8 +472,9 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl) result = le32_to_cpu(res.u32); ctrl->cntlid = result & 0xFFFF; if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) { - /* Secure concatenation is not implemented */ - if (result & NVME_CONNECT_AUTHREQ_ASCR) { + /* Check for secure concatenation */ + if ((result & NVME_CONNECT_AUTHREQ_ASCR) && + !ctrl->opts->concat) { dev_warn(ctrl->device, "qid 0: secure concatenation is not supported\n"); ret = -EOPNOTSUPP; @@ -535,7 +551,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid) /* Secure concatenation is not implemented */ if (result & NVME_CONNECT_AUTHREQ_ASCR) { dev_warn(ctrl->device, - "qid 0: secure concatenation is not supported\n"); + "qid %d: secure concatenation is not supported\n", qid); ret = -EOPNOTSUPP; goto out_free_data; } @@ -566,17 +582,17 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); * Do not retry when: * * - the DNR bit is set and the specification states no further connect - * attempts with the same set of paramenters should be attempted. + * attempts with the same set of parameters should be attempted. * * - when the authentication attempt fails, because the key was invalid. * This error code is set on the host side. */ bool nvmf_should_reconnect(struct nvme_ctrl *ctrl, int status) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) return false; - if (status == -EKEYREJECTED) + if (status == -EKEYREJECTED || status == -ENOKEY) return false; if (ctrl->opts->max_reconnects == -1 || @@ -650,7 +666,7 @@ static struct key *nvmf_parse_key(int key_id) return ERR_PTR(-EINVAL); } - key = key_lookup(key_id); + key = nvme_tls_key_lookup(key_id); if (IS_ERR(key)) pr_err("key id %08x not found\n", key_id); else @@ -691,6 +707,7 @@ static const match_table_t opt_tokens = { #endif #ifdef CONFIG_NVME_TCP_TLS { NVMF_OPT_TLS, "tls" }, + { NVMF_OPT_CONCAT, "concat" }, #endif { NVMF_OPT_ERR, NULL } }; @@ -720,6 +737,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, opts->tls = false; opts->tls_key = NULL; opts->keyring = NULL; + opts->concat = false; options = o = kstrdup(buf, GFP_KERNEL); if (!options) @@ -1038,6 +1056,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, } opts->tls = true; break; + case NVMF_OPT_CONCAT: + if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) { + pr_err("TLS is not supported\n"); + ret = -EINVAL; + goto out; + } + opts->concat = true; + break; default: pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", p); @@ -1064,6 +1090,23 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, pr_warn("failfast tmo (%d) larger than controller loss tmo (%d)\n", opts->fast_io_fail_tmo, ctrl_loss_tmo); } + if (opts->concat) { + if (opts->tls) { + pr_err("Secure concatenation over TLS is not supported\n"); + ret = -EINVAL; + goto out; + } + if (opts->tls_key) { + pr_err("Cannot specify a TLS key for secure concatenation\n"); + ret = -EINVAL; + goto out; + } + if (!opts->dhchap_secret) { + pr_err("Need to enable DH-CHAP for secure concatenation\n"); + ret = -EINVAL; + goto out; + } + } opts->host = nvmf_host_add(hostnqn, &hostid); if (IS_ERR(opts->host)) { @@ -1388,10 +1431,10 @@ static void __nvmf_concat_opt_tokens(struct seq_file *seq_file) tok = &opt_tokens[idx]; if (tok->token == NVMF_OPT_ERR) continue; - seq_puts(seq_file, ","); + seq_putc(seq_file, ','); seq_puts(seq_file, tok->pattern); } - seq_puts(seq_file, "\n"); + seq_putc(seq_file, '\n'); } static int nvmf_dev_show(struct seq_file *seq_file, void *private) diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 602135910ae9..caf5503d0833 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -66,6 +66,7 @@ enum { NVMF_OPT_TLS = 1 << 25, NVMF_OPT_KEYRING = 1 << 26, NVMF_OPT_TLS_KEY = 1 << 27, + NVMF_OPT_CONCAT = 1 << 28, }; /** @@ -79,7 +80,7 @@ enum { * @transport: Holds the fabric transport "technology name" (for a lack of * better description) that will be used by an NVMe controller * being added. - * @subsysnqn: Hold the fully qualified NQN subystem name (format defined + * @subsysnqn: Hold the fully qualified NQN subsystem name (format defined * in the NVMe specification, "NVMe Qualified Names"). * @traddr: The transport-specific TRADDR field for a port on the * subsystem which is adding a controller. @@ -101,6 +102,7 @@ enum { * @keyring: Keyring to use for key lookups * @tls_key: TLS key for encrypted connections (TCP) * @tls: Start TLS encrypted connections (TCP) + * @concat: Enabled Secure channel concatenation (TCP) * @disable_sqflow: disable controller sq flow control * @hdr_digest: generate/verify header digest (TCP) * @data_digest: generate/verify data digest (TCP) @@ -130,6 +132,7 @@ struct nvmf_ctrl_options { struct key *keyring; struct key *tls_key; bool tls; + bool concat; bool disable_sqflow; bool hdr_digest; bool data_digest; @@ -153,7 +156,7 @@ struct nvmf_ctrl_options { * @create_ctrl(): function pointer that points to a non-NVMe * implementation-specific fabric technology * that would go into starting up that fabric - * for the purpose of conneciton to an NVMe controller + * for the purpose of connection to an NVMe controller * using that fabric technology. * * Notes: @@ -162,7 +165,7 @@ struct nvmf_ctrl_options { * 2. create_ctrl() must be defined (even if it does nothing) * 3. struct nvmf_transport_ops must be statically allocated in the * modules .bss section so that a pure module_get on @module - * prevents the memory from beeing freed. + * prevents the memory from being freed. */ struct nvmf_transport_ops { struct list_head entry; @@ -214,9 +217,16 @@ static inline unsigned int nvmf_nr_io_queues(struct nvmf_ctrl_options *opts) min(opts->nr_poll_queues, num_online_cpus()); } +static inline unsigned long nvmf_get_virt_boundary(struct nvme_ctrl *ctrl, + bool is_admin) +{ + return 0; +} + int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val); int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val); int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val); +int nvmf_subsystem_reset(struct nvme_ctrl *ctrl); int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl); int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid); int nvmf_register_transport(struct nvmf_transport_ops *ops); diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c index 1ba10a5c656d..105d6cb41c72 100644 --- a/drivers/nvme/host/fault_inject.c +++ b/drivers/nvme/host/fault_inject.c @@ -6,6 +6,7 @@ */ #include <linux/moduleparam.h> +#include <linux/debugfs.h> #include "nvme.h" static DECLARE_FAULT_ATTR(fail_default_attr); @@ -75,7 +76,7 @@ void nvme_should_fail(struct request *req) /* inject status code and DNR bit */ status = fault_inject->status; if (fault_inject->dont_retry) - status |= NVME_SC_DNR; + status |= NVME_STATUS_DNR; nvme_req(req)->status = status; } } diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index f0b081332749..bc455fa98246 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -16,7 +16,6 @@ #include <linux/nvme-fc.h> #include "fc.h" #include <scsi/scsi_transport_fc.h> -#include <linux/blk-mq-pci.h> /* *************************** Data Structures/Defines ****************** */ @@ -521,6 +520,8 @@ nvme_fc_free_rport(struct kref *ref) WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); WARN_ON(!list_empty(&rport->ctrl_list)); + WARN_ON(!list_empty(&rport->ls_req_list)); + WARN_ON(!list_empty(&rport->ls_rcv_list)); /* remove from lport list */ spin_lock_irqsave(&nvme_fc_lock, flags); @@ -786,49 +787,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) "NVME-FC{%d}: controller connectivity lost. Awaiting " "Reconnect", ctrl->cnum); - switch (nvme_ctrl_state(&ctrl->ctrl)) { - case NVME_CTRL_NEW: - case NVME_CTRL_LIVE: - /* - * Schedule a controller reset. The reset will terminate the - * association and schedule the reconnect timer. Reconnects - * will be attempted until either the ctlr_loss_tmo - * (max_retries * connect_delay) expires or the remoteport's - * dev_loss_tmo expires. - */ - if (nvme_reset_ctrl(&ctrl->ctrl)) { - dev_warn(ctrl->ctrl.device, - "NVME-FC{%d}: Couldn't schedule reset.\n", - ctrl->cnum); - nvme_delete_ctrl(&ctrl->ctrl); - } - break; - - case NVME_CTRL_CONNECTING: - /* - * The association has already been terminated and the - * controller is attempting reconnects. No need to do anything - * futher. Reconnects will be attempted until either the - * ctlr_loss_tmo (max_retries * connect_delay) expires or the - * remoteport's dev_loss_tmo expires. - */ - break; - - case NVME_CTRL_RESETTING: - /* - * Controller is already in the process of terminating the - * association. No need to do anything further. The reconnect - * step will kick in naturally after the association is - * terminated. - */ - break; - - case NVME_CTRL_DELETING: - case NVME_CTRL_DELETING_NOIO: - default: - /* no action to take - let it delete */ - break; - } + set_bit(ASSOC_FAILED, &ctrl->flags); + nvme_reset_ctrl(&ctrl->ctrl); } /** @@ -941,7 +901,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss); * may crash. * * As such: - * Wrapper all the dma routines and check the dev pointer. + * Wrap all the dma routines and check the dev pointer. * * If simple mappings (return just a dma address, we'll noop them, * returning a dma address of 0. @@ -1405,7 +1365,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * - * The behavior of the fc-nvme initiator is such that it's + * The behavior of the fc-nvme initiator is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme target, so you may never get a @@ -1452,9 +1412,8 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) } static void -nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +nvme_fc_xmt_ls_rsp_free(struct nvmefc_ls_rcv_op *lsop) { - struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; struct nvme_fc_rport *rport = lsop->rport; struct nvme_fc_lport *lport = rport->lport; unsigned long flags; @@ -1476,6 +1435,14 @@ nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) } static void +nvme_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp) +{ + struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; + + nvme_fc_xmt_ls_rsp_free(lsop); +} + +static void nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) { struct nvme_fc_rport *rport = lsop->rport; @@ -1492,7 +1459,7 @@ nvme_fc_xmt_ls_rsp(struct nvmefc_ls_rcv_op *lsop) dev_warn(lport->dev, "LLDD rejected LS RSP xmt: LS %d status %d\n", w0->ls_cmd, ret); - nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); + nvme_fc_xmt_ls_rsp_free(lsop); return; } } @@ -1503,14 +1470,14 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, { struct fcnvme_ls_disconnect_assoc_rqst *rqst = &lsop->rqstbuf->rq_dis_assoc; - struct nvme_fc_ctrl *ctrl, *ret = NULL; + struct nvme_fc_ctrl *ctrl, *tmp, *ret = NULL; struct nvmefc_ls_rcv_op *oldls = NULL; u64 association_id = be64_to_cpu(rqst->associd.association_id); unsigned long flags; spin_lock_irqsave(&rport->lock, flags); - list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { + list_for_each_entry_safe(ctrl, tmp, &rport->ctrl_list, ctrl_list) { if (!nvme_fc_ctrl_get(ctrl)) continue; spin_lock(&ctrl->lock); @@ -1523,7 +1490,9 @@ nvme_fc_match_disconn_ls(struct nvme_fc_rport *rport, if (ret) /* leave the ctrl get reference */ break; + spin_unlock_irqrestore(&rport->lock, flags); nvme_fc_ctrl_put(ctrl); + spin_lock_irqsave(&rport->lock, flags); } spin_unlock_irqrestore(&rport->lock, flags); @@ -1990,8 +1959,8 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req) } /* - * For the linux implementation, if we have an unsuccesful - * status, they blk-mq layer can typically be called with the + * For the linux implementation, if we have an unsuccessful + * status, the blk-mq layer can typically be called with the * non-zero status and the content of the cqe isn't important. */ if (status) @@ -2080,7 +2049,8 @@ done: nvme_fc_complete_rq(rq); check_error: - if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) + if (terminate_assoc && + nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING) queue_work(nvme_reset_wq, &ctrl->ioerr_work); } @@ -2389,17 +2359,11 @@ nvme_fc_ctrl_free(struct kref *ref) container_of(ref, struct nvme_fc_ctrl, ref); unsigned long flags; - if (ctrl->ctrl.tagset) - nvme_remove_io_tag_set(&ctrl->ctrl); - /* remove from rport list */ spin_lock_irqsave(&ctrl->rport->lock, flags); list_del(&ctrl->ctrl_list); spin_unlock_irqrestore(&ctrl->rport->lock, flags); - nvme_unquiesce_admin_queue(&ctrl->ctrl); - nvme_remove_admin_tag_set(&ctrl->ctrl); - kfree(ctrl->queues); put_device(ctrl->dev); @@ -2463,7 +2427,7 @@ static bool nvme_fc_terminate_exchange(struct request *req, void *data) /* * This routine runs through all outstanding commands on the association - * and aborts them. This routine is typically be called by the + * and aborts them. This routine is typically called by the * delete_association routine. It is also called due to an error during * reconnect. In that scenario, it is most likely a command that initializes * the controller, including fabric Connect commands on io queues, that @@ -2513,7 +2477,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) * writing the registers for shutdown and polling (call * nvme_disable_ctrl()). Given a bunch of i/o was potentially * just aborted and we will wait on those contexts, and given - * there was no indication of how live the controlelr is on the + * there was no indication of how live the controller is on the * link, don't send more io to create more contexts for the * shutdown. Let the controller fail via keepalive failure if * its still present. @@ -2534,6 +2498,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) { + enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); + /* * if an error (io timeout, etc) while (re)connecting, the remote * port requested terminating of the association (disconnect_ls) @@ -2541,9 +2507,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) * the controller. Abort any ios on the association and let the * create_association error path resolve things. */ - if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { + if (state == NVME_CTRL_CONNECTING) { __nvme_fc_abort_outstanding_ios(ctrl, true); - set_bit(ASSOC_FAILED, &ctrl->flags); dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: transport error during (re)connect\n", ctrl->cnum); @@ -2551,7 +2516,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) } /* Otherwise, only proceed if in LIVE state - e.g. on first error */ - if (ctrl->ctrl.state != NVME_CTRL_LIVE) + if (state != NVME_CTRL_LIVE) return; dev_warn(ctrl->ctrl.device, @@ -2611,7 +2576,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, if (ret) return -ENOMEM; - op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); + op->nents = blk_rq_map_sg(rq, freq->sg_table.sgl); WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, rq_dma_dir(rq)); @@ -2655,7 +2620,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, * as part of the exchange. The CQE is the last thing for the io, * which is transferred (explicitly or implicitly) with the RSP IU * sent on the exchange. After the CQE is received, the FC exchange is - * terminaed and the Exchange may be used on a different io. + * terminated and the Exchange may be used on a different io. * * The transport to LLDD api has the transport making a request for a * new fcp io request to the LLDD. The LLDD then allocates a FC exchange @@ -2810,7 +2775,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, * as WRITE ZEROES will return a non-zero rq payload_bytes yet * there is no actual payload to be transferred. * To get it right, key data transmission on there being 1 or - * more physical segments in the sg list. If there is no + * more physical segments in the sg list. If there are no * physical segments, there is no payload. */ if (blk_rq_nr_phys_segments(rq)) { @@ -2898,7 +2863,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) unsigned int nr_io_queues; int ret; - nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(), ctrl->lport->ops->max_hw_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { @@ -2952,7 +2917,7 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) unsigned int nr_io_queues; int ret; - nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + nr_io_queues = min3(opts->nr_io_queues, num_online_cpus(), ctrl->lport->ops->max_hw_queues); ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { @@ -3062,15 +3027,20 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) struct nvmefc_ls_rcv_op *disls = NULL; unsigned long flags; int ret; - bool changed; ++ctrl->ctrl.nr_reconnects; - if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) + spin_lock_irqsave(&ctrl->rport->lock, flags); + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { + spin_unlock_irqrestore(&ctrl->rport->lock, flags); return -ENODEV; + } - if (nvme_fc_ctlr_active_on_rport(ctrl)) + if (nvme_fc_ctlr_active_on_rport(ctrl)) { + spin_unlock_irqrestore(&ctrl->rport->lock, flags); return -ENOTUNIQ; + } + spin_unlock_irqrestore(&ctrl->rport->lock, flags); dev_info(ctrl->ctrl.device, "NVME-FC{%d}: create association : host wwpn 0x%016llx " @@ -3132,7 +3102,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ctrl->ctrl.icdoff) { dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", ctrl->ctrl.icdoff); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3140,7 +3110,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (!nvme_ctrl_sgl_supported(&ctrl->ctrl)) { dev_err(ctrl->ctrl.device, "Mandatory sgls are not supported!\n"); - ret = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + ret = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out_stop_keep_alive; } @@ -3173,12 +3143,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) if (ret) goto out_term_aen_ops; - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) { + ret = -EIO; + goto out_term_aen_ops; + } ctrl->ctrl.nr_reconnects = 0; - - if (changed) - nvme_start_ctrl(&ctrl->ctrl); + nvme_start_ctrl(&ctrl->ctrl); return 0; /* Success */ @@ -3286,13 +3257,20 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) { struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); - cancel_work_sync(&ctrl->ioerr_work); cancel_delayed_work_sync(&ctrl->connect_work); + /* * kill the association on the link side. this will block * waiting for io to terminate */ nvme_fc_delete_association(ctrl); + cancel_work_sync(&ctrl->ioerr_work); + + if (ctrl->ctrl.tagset) + nvme_remove_io_tag_set(&ctrl->ctrl); + + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvme_remove_admin_tag_set(&ctrl->ctrl); } static void @@ -3325,7 +3303,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); } else { if (portptr->port_state == FC_OBJSTATE_ONLINE) { - if (status > 0 && (status & NVME_SC_DNR)) + if (status > 0 && (status & NVME_STATUS_DNR)) dev_warn(ctrl->ctrl.device, "NVME-FC{%d}: reconnect failure\n", ctrl->cnum); @@ -3382,10 +3360,12 @@ static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_fc_free_ctrl, .submit_async_event = nvme_fc_submit_async_event, .delete_ctrl = nvme_fc_delete_ctrl, .get_address = nvmf_get_address, + .get_virt_boundary = nvmf_get_virt_boundary, }; static void @@ -3444,12 +3424,11 @@ nvme_fc_existing_controller(struct nvme_fc_rport *rport, return found; } -static struct nvme_ctrl * -nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, +static struct nvme_fc_ctrl * +nvme_fc_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) { struct nvme_fc_ctrl *ctrl; - unsigned long flags; int ret, idx, ctrl_loss_tmo; if (!(rport->remoteport.port_role & @@ -3538,7 +3517,35 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, if (lport->dev) ctrl->ctrl.numa_node = dev_to_node(lport->dev); - /* at this point, teardown path changes to ref counting on nvme ctrl */ + return ctrl; + +out_free_queues: + kfree(ctrl->queues); +out_free_ida: + put_device(ctrl->dev); + ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); +out_free_ctrl: + kfree(ctrl); +out_fail: + /* exit via here doesn't follow ctlr ref points */ + return ERR_PTR(ret); +} + +static struct nvme_ctrl * +nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, + struct nvme_fc_lport *lport, struct nvme_fc_rport *rport) +{ + struct nvme_fc_ctrl *ctrl; + unsigned long flags; + int ret; + + ctrl = nvme_fc_alloc_ctrl(dev, opts, lport, rport); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, &nvme_fc_admin_mq_ops, @@ -3551,8 +3558,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || - !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); goto fail_ctrl; @@ -3584,6 +3590,7 @@ fail_ctrl: /* initiate nvme ctrl ref counting teardown */ nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: /* Remove core ctrl ref. */ nvme_put_ctrl(&ctrl->ctrl); @@ -3597,20 +3604,8 @@ fail_ctrl: nvme_fc_rport_get(rport); return ERR_PTR(-EIO); - -out_free_queues: - kfree(ctrl->queues); -out_free_ida: - put_device(ctrl->dev); - ida_free(&nvme_fc_ctrl_cnt, ctrl->cnum); -out_free_ctrl: - kfree(ctrl); -out_fail: - /* exit via here doesn't follow ctlr ref points */ - return ERR_PTR(ret); } - struct nvmet_fc_traddr { u64 nn; u64 pn; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 8df73a0b3980..89a1a1043d63 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -6,7 +6,7 @@ #include <linux/hwmon.h> #include <linux/units.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "nvme.h" diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 8b69427a4476..a9c097dacad6 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -3,6 +3,7 @@ * Copyright (c) 2011-2014, Intel Corporation. * Copyright (c) 2017-2021 Christoph Hellwig. */ +#include <linux/blk-integrity.h> #include <linux/ptrace.h> /* for force_successful_syscall_return */ #include <linux/nvme_ioctl.h> #include <linux/io_uring/cmd.h> @@ -111,68 +112,56 @@ static struct request *nvme_alloc_user_request(struct request_queue *q, return req; } -static void nvme_unmap_bio(struct bio *bio) -{ - if (bio_integrity(bio)) - bio_integrity_unmap_free_user(bio); - blk_rq_unmap_user(bio); -} - static int nvme_map_user_request(struct request *req, u64 ubuffer, unsigned bufflen, void __user *meta_buffer, unsigned meta_len, - u32 meta_seed, struct io_uring_cmd *ioucmd, unsigned int flags) + struct iov_iter *iter, unsigned int flags) { struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; + bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; + bool has_metadata = meta_buffer && meta_len; struct bio *bio = NULL; int ret; - if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { - struct iov_iter iter; - - /* fixedbufs is only for non-vectored io */ - if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) + if (!nvme_ctrl_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, "using unchecked data buffer\n"); + if (has_metadata) { + if (!supports_metadata) return -EINVAL; - ret = io_uring_cmd_import_fixed(ubuffer, bufflen, - rq_data_dir(req), &iter, ioucmd); - if (ret < 0) - goto out; - ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL); - } else { + + if (!nvme_ctrl_meta_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, + "using unchecked metadata buffer\n"); + } + + if (iter) + ret = blk_rq_map_user_iov(q, req, NULL, iter, GFP_KERNEL); + else ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer), bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0, 0, rq_data_dir(req)); - } - if (ret) - goto out; + return ret; - bio = req->bio; - if (bdev) { - bio_set_dev(bio, bdev); - if (meta_buffer && meta_len) { - ret = bio_integrity_map_user(bio, meta_buffer, meta_len, - meta_seed); - if (ret) - goto out_unmap; - req->cmd_flags |= REQ_INTEGRITY; - } + if (has_metadata) { + ret = blk_rq_integrity_map_user(req, meta_buffer, meta_len); + if (ret) + goto out_unmap; } return ret; out_unmap: if (bio) - nvme_unmap_bio(bio); -out: - blk_mq_free_request(req); + blk_rq_unmap_user(bio); return ret; } static int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, u64 ubuffer, unsigned bufflen, - void __user *meta_buffer, unsigned meta_len, u32 meta_seed, + void __user *meta_buffer, unsigned meta_len, u64 *result, unsigned timeout, unsigned int flags) { struct nvme_ns *ns = q->queuedata; @@ -189,9 +178,9 @@ static int nvme_submit_user_cmd(struct request_queue *q, req->timeout = timeout; if (ubuffer && bufflen) { ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer, - meta_len, meta_seed, NULL, flags); + meta_len, NULL, flags); if (ret) - return ret; + goto out_free_req; } bio = req->bio; @@ -202,12 +191,15 @@ static int nvme_submit_user_cmd(struct request_queue *q, if (result) *result = le64_to_cpu(nvme_req(req)->result.u64); if (bio) - nvme_unmap_bio(bio); + blk_rq_unmap_user(bio); blk_mq_free_request(req); if (effects) nvme_passthru_end(ctrl, ns, effects, cmd, ret); + return ret; +out_free_req: + blk_mq_free_request(req); return ret; } @@ -266,11 +258,11 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.control = cpu_to_le16(io.control); c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); c.rw.reftag = cpu_to_le32(io.reftag); - c.rw.apptag = cpu_to_le16(io.apptag); - c.rw.appmask = cpu_to_le16(io.appmask); + c.rw.lbat = cpu_to_le16(io.apptag); + c.rw.lbatm = cpu_to_le16(io.appmask); return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata, - meta_len, lower_32_bits(io.slba), NULL, 0, 0); + meta_len, NULL, 0, 0); } static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, @@ -278,8 +270,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, { if (ns && nsid != ns->head->ns_id) { dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u)" - "of namespace\n", + "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", current->comm, nsid, ns->head->ns_id); return false; } @@ -325,7 +316,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &result, timeout, 0); + cmd.metadata_len, &result, timeout, 0); if (status >= 0) { if (put_user(result, &ucmd->result)) @@ -372,7 +363,7 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns, status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata), - cmd.metadata_len, 0, &cmd.result, timeout, flags); + cmd.metadata_len, &cmd.result, timeout, flags); if (status >= 0) { if (put_user(cmd.result, &ucmd->result)) @@ -404,17 +395,18 @@ struct nvme_uring_cmd_pdu { static inline struct nvme_uring_cmd_pdu *nvme_uring_cmd_pdu( struct io_uring_cmd *ioucmd) { - return (struct nvme_uring_cmd_pdu *)&ioucmd->pdu; + return io_uring_cmd_to_pdu(ioucmd, struct nvme_uring_cmd_pdu); } -static void nvme_uring_task_cb(struct io_uring_cmd *ioucmd, - unsigned issue_flags) +static void nvme_uring_task_cb(struct io_tw_req tw_req, io_tw_token_t tw) { + struct io_uring_cmd *ioucmd = io_uring_cmd_from_tw(tw_req); struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); if (pdu->bio) - nvme_unmap_bio(pdu->bio); - io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); + blk_rq_unmap_user(pdu->bio); + io_uring_cmd_done32(ioucmd, pdu->status, pdu->result, + IO_URING_CMD_TASK_WORK_ISSUE_FLAGS); } static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, @@ -423,28 +415,24 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, struct io_uring_cmd *ioucmd = req->end_io_data; struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); - if (nvme_req(req)->flags & NVME_REQ_CANCELLED) + if (nvme_req(req)->flags & NVME_REQ_CANCELLED) { pdu->status = -EINTR; - else + } else { pdu->status = nvme_req(req)->status; + if (!pdu->status) + pdu->status = blk_status_to_errno(err); + } pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* - * For iopoll, complete it directly. Note that using the uring_cmd - * helper for this is safe only because we check blk_rq_is_poll(). - * As that returns false if we're NOT on a polled queue, then it's - * safe to use the polled completion helper. - * - * Otherwise, move the completion to task work. + * IOPOLL could potentially complete this request directly, but + * if multiple rings are polling on the same queue, then it's possible + * for one ring to find completions for another ring. Punting the + * completion via task_work will always direct it to the right + * location, rather than potentially complete requests for ringA + * under iopoll invocations from ringB. */ - if (blk_rq_is_poll(req)) { - if (pdu->bio) - nvme_unmap_bio(pdu->bio); - io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); - } else { - io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); - } - + io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); return RQ_END_IO_FREE; } @@ -456,8 +444,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, struct request_queue *q = ns ? ns->queue : ctrl->admin_q; struct nvme_uring_data d; struct nvme_command c; + struct iov_iter iter; + struct iov_iter *map_iter = NULL; struct request *req; - blk_opf_t rq_flags = REQ_ALLOC_CACHE; + blk_opf_t rq_flags = 0; blk_mq_req_flags_t blk_flags = 0; int ret; @@ -491,6 +481,22 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, d.metadata_len = READ_ONCE(cmd->metadata_len); d.timeout_ms = READ_ONCE(cmd->timeout_ms); + if (d.data_len && (ioucmd->flags & IORING_URING_CMD_FIXED)) { + int ddir = nvme_is_write(&c) ? WRITE : READ; + + if (vec) + ret = io_uring_cmd_import_fixed_vec(ioucmd, + u64_to_user_ptr(d.addr), d.data_len, + ddir, &iter, issue_flags); + else + ret = io_uring_cmd_import_fixed(d.addr, d.data_len, + ddir, &iter, ioucmd, issue_flags); + if (ret < 0) + return ret; + + map_iter = &iter; + } + if (issue_flags & IO_URING_F_NONBLOCK) { rq_flags |= REQ_NOWAIT; blk_flags = BLK_MQ_REQ_NOWAIT; @@ -503,12 +509,12 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return PTR_ERR(req); req->timeout = d.timeout_ms ? msecs_to_jiffies(d.timeout_ms) : 0; - if (d.addr && d.data_len) { - ret = nvme_map_user_request(req, d.addr, - d.data_len, nvme_to_user_ptr(d.metadata), - d.metadata_len, 0, ioucmd, vec); + if (d.data_len) { + ret = nvme_map_user_request(req, d.addr, d.data_len, + nvme_to_user_ptr(d.metadata), d.metadata_len, + map_iter, vec ? NVME_IOCTL_VEC : 0); if (ret) - return ret; + goto out_free_req; } /* to free bio on completion, as req->bio will be null at that time */ @@ -518,6 +524,10 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns, req->end_io = nvme_uring_cmd_end_io; blk_execute_rq_nowait(req, false); return -EIOCBQUEUED; + +out_free_req: + blk_mq_free_request(req); + return ret; } static bool is_ctrl_ioctl(unsigned int cmd) @@ -634,8 +644,6 @@ static int nvme_ns_uring_cmd(struct nvme_ns *ns, struct io_uring_cmd *ioucmd, struct nvme_ctrl *ctrl = ns->ctrl; int ret; - BUILD_BUG_ON(sizeof(struct nvme_uring_cmd_pdu) > sizeof(ioucmd->pdu)); - ret = nvme_uring_cmd_checks(issue_flags); if (ret) return ret; @@ -710,7 +718,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, /* * Handle ioctls that apply to the controller instead of the namespace - * seperately and drop the ns SRCU reference early. This avoids a + * separately and drop the ns SRCU reference early. This avoids a * deadlock when deleting namespaces using the passthrough interface. */ if (is_ctrl_ioctl(cmd)) diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index d8b6b4648eaf..174027d1cc19 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -10,13 +10,65 @@ #include "nvme.h" bool multipath = true; -module_param(multipath, bool, 0444); +static bool multipath_always_on; + +static int multipath_param_set(const char *val, const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret) + return ret; + + if (multipath_always_on && !*arg) { + pr_err("Can't disable multipath when multipath_always_on is configured.\n"); + *arg = true; + return -EINVAL; + } + + return 0; +} + +static const struct kernel_param_ops multipath_param_ops = { + .set = multipath_param_set, + .get = param_get_bool, +}; + +module_param_cb(multipath, &multipath_param_ops, &multipath, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +static int multipath_always_on_set(const char *val, + const struct kernel_param *kp) +{ + int ret; + bool *arg = kp->arg; + + ret = param_set_bool(val, kp); + if (ret < 0) + return ret; + + if (*arg) + multipath = true; + + return 0; +} + +static const struct kernel_param_ops multipath_always_on_ops = { + .set = multipath_always_on_set, + .get = param_get_bool, +}; + +module_param_cb(multipath_always_on, &multipath_always_on_ops, + &multipath_always_on, 0444); +MODULE_PARM_DESC(multipath_always_on, + "create multipath node always except for private namespace with non-unique nsid; note that this also implicitly enables native multipath support"); + static const char *nvme_iopolicy_names[] = { [NVME_IOPOLICY_NUMA] = "numa", [NVME_IOPOLICY_RR] = "round-robin", + [NVME_IOPOLICY_QD] = "queue-depth", }; static int iopolicy = NVME_IOPOLICY_NUMA; @@ -29,6 +81,8 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp) iopolicy = NVME_IOPOLICY_NUMA; else if (!strncmp(val, "round-robin", 11)) iopolicy = NVME_IOPOLICY_RR; + else if (!strncmp(val, "queue-depth", 11)) + iopolicy = NVME_IOPOLICY_QD; else return -EINVAL; @@ -43,7 +97,7 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp) module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy, &iopolicy, 0644); MODULE_PARM_DESC(iopolicy, - "Default multipath I/O policy; 'numa' (default) or 'round-robin'"); + "Default multipath I/O policy; 'numa' (default), 'round-robin' or 'queue-depth'"); void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) { @@ -57,7 +111,7 @@ void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) lockdep_assert_held(&subsys->lock); list_for_each_entry(h, &subsys->nsheads, entry) if (h->disk) - blk_mq_unfreeze_queue(h->disk->queue); + blk_mq_unfreeze_queue_nomemrestore(h->disk->queue); } void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) @@ -83,7 +137,7 @@ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; - u16 status = nvme_req(req)->status & 0x7ff; + u16 status = nvme_req(req)->status & NVME_SCT_SC_MASK; unsigned long flags; struct bio *bio; @@ -128,7 +182,14 @@ void nvme_mpath_start_request(struct request *rq) struct nvme_ns *ns = rq->q->queuedata; struct gendisk *disk = ns->head->disk; - if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq)) + if ((READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_QD) && + !(nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE)) { + atomic_inc(&ns->ctrl->nr_active); + nvme_req(rq)->flags |= NVME_MPATH_CNT_ACTIVE; + } + + if (!blk_queue_io_stat(disk->queue) || blk_rq_is_passthrough(rq) || + (nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; @@ -141,6 +202,9 @@ void nvme_mpath_end_request(struct request *rq) { struct nvme_ns *ns = rq->q->queuedata; + if (nvme_req(rq)->flags & NVME_MPATH_CNT_ACTIVE) + atomic_dec_if_positive(&ns->ctrl->nr_active); + if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; bdev_end_io_acct(ns->head->disk->part0, req_op(rq), @@ -154,7 +218,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (!ns->head->disk) continue; kblockd_schedule_work(&ns->head->requeue_work); @@ -198,7 +263,8 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { nvme_mpath_clear_current_path(ns); kblockd_schedule_work(&ns->head->requeue_work); } @@ -213,7 +279,8 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) int srcu_idx; srcu_idx = srcu_read_lock(&head->srcu); - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (capacity != get_capacity(ns->disk)) clear_bit(NVME_NS_READY, &ns->flags); } @@ -246,7 +313,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) int found_distance = INT_MAX, fallback_distance = INT_MAX, distance; struct nvme_ns *found = NULL, *fallback = NULL, *ns; - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (nvme_path_is_disabled(ns)) continue; @@ -291,10 +359,15 @@ static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); } -static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, - int node, struct nvme_ns *old) +static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head) { struct nvme_ns *ns, *found = NULL; + int node = numa_node_id(); + struct nvme_ns *old = srcu_dereference(head->current_path[node], + &head->srcu); + + if (unlikely(!old)) + return __nvme_find_path(head, node); if (list_is_singular(&head->list)) { if (nvme_path_is_disabled(old)) @@ -334,13 +407,50 @@ out: return found; } +static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) +{ + struct nvme_ns *best_opt = NULL, *best_nonopt = NULL, *ns; + unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX; + unsigned int depth; + + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { + if (nvme_path_is_disabled(ns)) + continue; + + depth = atomic_read(&ns->ctrl->nr_active); + + switch (ns->ana_state) { + case NVME_ANA_OPTIMIZED: + if (depth < min_depth_opt) { + min_depth_opt = depth; + best_opt = ns; + } + break; + case NVME_ANA_NONOPTIMIZED: + if (depth < min_depth_nonopt) { + min_depth_nonopt = depth; + best_nonopt = ns; + } + break; + default: + break; + } + + if (min_depth_opt == 0) + return best_opt; + } + + return best_opt ? best_opt : best_nonopt; +} + static inline bool nvme_path_is_optimized(struct nvme_ns *ns) { return nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE && ns->ana_state == NVME_ANA_OPTIMIZED; } -inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) +static struct nvme_ns *nvme_numa_path(struct nvme_ns_head *head) { int node = numa_node_id(); struct nvme_ns *ns; @@ -348,32 +458,54 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) ns = srcu_dereference(head->current_path[node], &head->srcu); if (unlikely(!ns)) return __nvme_find_path(head, node); - - if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) - return nvme_round_robin_path(head, node, ns); if (unlikely(!nvme_path_is_optimized(ns))) return __nvme_find_path(head, node); return ns; } +inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) +{ + switch (READ_ONCE(head->subsys->iopolicy)) { + case NVME_IOPOLICY_QD: + return nvme_queue_depth_path(head); + case NVME_IOPOLICY_RR: + return nvme_round_robin_path(head); + default: + return nvme_numa_path(head); + } +} + static bool nvme_available_path(struct nvme_ns_head *head) { struct nvme_ns *ns; - list_for_each_entry_rcu(ns, &head->list, siblings) { + if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) + return false; + + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; switch (nvme_ctrl_state(ns->ctrl)) { case NVME_CTRL_LIVE: case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: - /* fallthru */ return true; default: break; } } - return false; + + /* + * If "head->delayed_removal_secs" is configured (i.e., non-zero), do + * not immediately fail I/O. Instead, requeue the I/O for the configured + * duration, anticipating that if there's a transient link failure then + * it may recover within this time window. This parameter is exported to + * userspace via sysfs, and its default value is zero. It is internally + * mapped to NVME_NSHEAD_QUEUE_IF_NO_PATH. When delayed_removal_secs is + * non-zero, this flag is set to true. When zero, the flag is cleared. + */ + return nvme_mpath_queue_if_no_path(head); } static void nvme_ns_head_submit_bio(struct bio *bio) @@ -427,9 +559,24 @@ static void nvme_ns_head_release(struct gendisk *disk) nvme_put_ns_head(disk->private_data); } +static int nvme_ns_head_get_unique_id(struct gendisk *disk, u8 id[16], + enum blk_unique_id type) +{ + struct nvme_ns_head *head = disk->private_data; + struct nvme_ns *ns; + int srcu_idx, ret = -EWOULDBLOCK; + + srcu_idx = srcu_read_lock(&head->srcu); + ns = nvme_find_path(head); + if (ns) + ret = nvme_ns_get_unique_id(ns, id, type); + srcu_read_unlock(&head->srcu, srcu_idx); + return ret; +} + #ifdef CONFIG_BLK_DEV_ZONED static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) + unsigned int nr_zones, struct blk_report_zones_args *args) { struct nvme_ns_head *head = disk->private_data; struct nvme_ns *ns; @@ -438,7 +585,7 @@ static int nvme_ns_head_report_zones(struct gendisk *disk, sector_t sector, srcu_idx = srcu_read_lock(&head->srcu); ns = nvme_find_path(head); if (ns) - ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); + ret = nvme_ns_report_zones(ns, sector, nr_zones, args); srcu_read_unlock(&head->srcu, srcu_idx); return ret; } @@ -454,6 +601,7 @@ const struct block_device_operations nvme_ns_head_ops = { .ioctl = nvme_ns_head_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, .getgeo = nvme_getgeo, + .get_unique_id = nvme_ns_head_get_unique_id, .report_zones = nvme_ns_head_report_zones, .pr_ops = &nvme_pr_ops, }; @@ -500,6 +648,20 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) return ret; } +static void nvme_partition_scan_work(struct work_struct *work) +{ + struct nvme_ns_head *head = + container_of(work, struct nvme_ns_head, partition_scan_work); + + if (WARN_ON_ONCE(!test_and_clear_bit(GD_SUPPRESS_PART_SCAN, + &head->disk->state))) + return; + + mutex_lock(&head->disk->open_mutex); + bdev_disk_changed(head->disk, false); + mutex_unlock(&head->disk->open_mutex); +} + static void nvme_requeue_work(struct work_struct *work) { struct nvme_ns_head *head = @@ -518,55 +680,95 @@ static void nvme_requeue_work(struct work_struct *work) } } +static void nvme_remove_head(struct nvme_ns_head *head) +{ + if (test_and_clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { + /* + * requeue I/O after NVME_NSHEAD_DISK_LIVE has been cleared + * to allow multipath to fail all I/O. + */ + kblockd_schedule_work(&head->requeue_work); + + nvme_cdev_del(&head->cdev, &head->cdev_device); + synchronize_srcu(&head->srcu); + del_gendisk(head->disk); + } + nvme_put_ns_head(head); +} + +static void nvme_remove_head_work(struct work_struct *work) +{ + struct nvme_ns_head *head = container_of(to_delayed_work(work), + struct nvme_ns_head, remove_work); + bool remove = false; + + mutex_lock(&head->subsys->lock); + if (list_empty(&head->list)) { + list_del_init(&head->entry); + remove = true; + } + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); + + module_put(THIS_MODULE); +} + int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) { struct queue_limits lim; - bool vwc = false; mutex_init(&head->lock); bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); + INIT_WORK(&head->partition_scan_work, nvme_partition_scan_work); + INIT_DELAYED_WORK(&head->remove_work, nvme_remove_head_work); + head->delayed_removal_secs = 0; /* - * Add a multipath node if the subsystems supports multiple controllers. - * We also do this for private namespaces as the namespace sharing flag - * could change after a rescan. + * If "multipath_always_on" is enabled, a multipath node is added + * regardless of whether the disk is single/multi ported, and whether + * the namespace is shared or private. If "multipath_always_on" is not + * enabled, a multipath node is added only if the subsystem supports + * multiple controllers and the "multipath" option is configured. In + * either case, for private namespaces, we ensure that the NSID is + * unique. */ - if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || - !nvme_is_unique_nsid(ctrl, head) || !multipath) + if (!multipath_always_on) { + if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || + !multipath) + return 0; + } + + if (!nvme_is_unique_nsid(ctrl, head)) return 0; blk_set_stacking_limits(&lim); lim.dma_alignment = 3; - if (head->ids.csi != NVME_CSI_ZNS) - lim.max_zone_append_sectors = 0; + lim.features |= BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT | + BLK_FEAT_POLL | BLK_FEAT_ATOMIC_WRITES; + if (head->ids.csi == NVME_CSI_ZNS) + lim.features |= BLK_FEAT_ZONED; head->disk = blk_alloc_disk(&lim, ctrl->numa_node); if (IS_ERR(head->disk)) return PTR_ERR(head->disk); head->disk->fops = &nvme_ns_head_ops; head->disk->private_data = head; - sprintf(head->disk->disk_name, "nvme%dn%d", - ctrl->subsys->instance, head->instance); - blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_IO_STAT, head->disk->queue); /* - * This assumes all controllers that refer to a namespace either - * support poll queues or not. That is not a strict guarantee, - * but if the assumption is wrong the effect is only suboptimal - * performance but not correctness problem. + * We need to suppress the partition scan from occuring within the + * controller's scan_work context. If a path error occurs here, the IO + * will wait until a path becomes available or all paths are torn down, + * but that action also occurs within scan_work, so it would deadlock. + * Defer the partition scan to a different context that does not block + * scan_work. */ - if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL && - ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues) - blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue); - - /* we need to propagate up the VMC settings */ - if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) - vwc = true; - blk_queue_write_cache(head->disk->queue, vwc, vwc); + set_bit(GD_SUPPRESS_PART_SCAN, &head->disk->state); + sprintf(head->disk->disk_name, "nvme%dn%d", + ctrl->subsys->instance, head->instance); + nvme_tryget_ns_head(head); return 0; } @@ -587,12 +789,15 @@ static void nvme_mpath_set_live(struct nvme_ns *ns) rc = device_add_disk(&head->subsys->dev, head->disk, nvme_ns_attr_groups); if (rc) { - clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); + clear_bit(NVME_NSHEAD_DISK_LIVE, &head->flags); return; } nvme_add_ns_head_cdev(head); + queue_work(nvme_wq, &head->partition_scan_work); } + nvme_mpath_add_sysfs_link(ns->head); + mutex_lock(&head->lock); if (nvme_path_is_optimized(ns)) { int node, srcu_idx; @@ -675,6 +880,25 @@ static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, if (nvme_state_is_live(ns->ana_state) && nvme_ctrl_state(ns->ctrl) == NVME_CTRL_LIVE) nvme_mpath_set_live(ns); + else { + /* + * Add sysfs link from multipath head gendisk node to path + * device gendisk node. + * If path's ana state is live (i.e. state is either optimized + * or non-optimized) while we alloc the ns then sysfs link would + * be created from nvme_mpath_set_live(). In that case we would + * not fallthrough this code path. However for the path's ana + * state other than live, we call nvme_mpath_set_live() only + * after ana state transitioned to the live state. But we still + * want to create the sysfs link from head node to a path device + * irrespctive of the path's ana state. + * If we reach through here then it means that path's ana state + * is not live but still create the sysfs link to this path from + * head node if head node of the path has already come alive. + */ + if (test_bit(NVME_NSHEAD_DISK_LIVE, &ns->head->flags)) + nvme_mpath_add_sysfs_link(ns->head); + } } static int nvme_update_ana_state(struct nvme_ctrl *ctrl, @@ -696,7 +920,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, return 0; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { unsigned nsid; again: nsid = le32_to_cpu(desc->nsids[n]); @@ -745,7 +970,7 @@ static int nvme_read_ana_log(struct nvme_ctrl *ctrl) if (nr_change_groups) mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); else - del_timer_sync(&ctrl->anatt_timer); + timer_delete_sync(&ctrl->anatt_timer); out_unlock: mutex_unlock(&ctrl->ana_lock); return error; @@ -775,7 +1000,7 @@ void nvme_mpath_update(struct nvme_ctrl *ctrl) static void nvme_anatt_timeout(struct timer_list *t) { - struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); + struct nvme_ctrl *ctrl = timer_container_of(ctrl, t, anatt_timer); dev_info(ctrl->device, "ANATT timeout, resetting controller.\n"); nvme_reset_ctrl(ctrl); @@ -785,7 +1010,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl) { if (!nvme_ctrl_use_ana(ctrl)) return; - del_timer_sync(&ctrl->anatt_timer); + timer_delete_sync(&ctrl->anatt_timer); cancel_work_sync(&ctrl->ana_work); } @@ -803,6 +1028,29 @@ static ssize_t nvme_subsys_iopolicy_show(struct device *dev, nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]); } +static void nvme_subsys_iopolicy_update(struct nvme_subsystem *subsys, + int iopolicy) +{ + struct nvme_ctrl *ctrl; + int old_iopolicy = READ_ONCE(subsys->iopolicy); + + if (old_iopolicy == iopolicy) + return; + + WRITE_ONCE(subsys->iopolicy, iopolicy); + + /* iopolicy changes clear the mpath by design */ + mutex_lock(&nvme_subsystems_lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) + nvme_mpath_clear_ctrl_paths(ctrl); + mutex_unlock(&nvme_subsystems_lock); + + pr_notice("subsysnqn %s iopolicy changed from %s to %s\n", + subsys->subnqn, + nvme_iopolicy_names[old_iopolicy], + nvme_iopolicy_names[iopolicy]); +} + static ssize_t nvme_subsys_iopolicy_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -812,7 +1060,7 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev, for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { if (sysfs_streq(buf, nvme_iopolicy_names[i])) { - WRITE_ONCE(subsys->iopolicy, i); + nvme_subsys_iopolicy_update(subsys, i); return count; } } @@ -838,6 +1086,88 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, } DEVICE_ATTR_RO(ana_state); +static ssize_t queue_depth_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + + if (ns->head->subsys->iopolicy != NVME_IOPOLICY_QD) + return 0; + + return sysfs_emit(buf, "%d\n", atomic_read(&ns->ctrl->nr_active)); +} +DEVICE_ATTR_RO(queue_depth); + +static ssize_t numa_nodes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int node, srcu_idx; + nodemask_t numa_nodes; + struct nvme_ns *current_ns; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + struct nvme_ns_head *head = ns->head; + + if (head->subsys->iopolicy != NVME_IOPOLICY_NUMA) + return 0; + + nodes_clear(numa_nodes); + + srcu_idx = srcu_read_lock(&head->srcu); + for_each_node(node) { + current_ns = srcu_dereference(head->current_path[node], + &head->srcu); + if (ns == current_ns) + node_set(node, numa_nodes); + } + srcu_read_unlock(&head->srcu, srcu_idx); + + return sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&numa_nodes)); +} +DEVICE_ATTR_RO(numa_nodes); + +static ssize_t delayed_removal_secs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + int ret; + + mutex_lock(&head->subsys->lock); + ret = sysfs_emit(buf, "%u\n", head->delayed_removal_secs); + mutex_unlock(&head->subsys->lock); + return ret; +} + +static ssize_t delayed_removal_secs_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + struct nvme_ns_head *head = disk->private_data; + unsigned int sec; + int ret; + + ret = kstrtouint(buf, 0, &sec); + if (ret < 0) + return ret; + + mutex_lock(&head->subsys->lock); + head->delayed_removal_secs = sec; + if (sec) + set_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + else + clear_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags); + mutex_unlock(&head->subsys->lock); + /* + * Ensure that update to NVME_NSHEAD_QUEUE_IF_NO_PATH is seen + * by its reader. + */ + synchronize_srcu(&head->srcu); + + return count; +} + +DEVICE_ATTR_RW(delayed_removal_secs); + static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *desc, void *data) { @@ -850,6 +1180,85 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl, return -ENXIO; /* just break out of the loop */ } +void nvme_mpath_add_sysfs_link(struct nvme_ns_head *head) +{ + struct device *target; + int rc, srcu_idx; + struct nvme_ns *ns; + struct kobject *kobj; + + /* + * Ensure head disk node is already added otherwise we may get invalid + * kobj for head disk node + */ + if (!test_bit(GD_ADDED, &head->disk->state)) + return; + + kobj = &disk_to_dev(head->disk)->kobj; + + /* + * loop through each ns chained through the head->list and create the + * sysfs link from head node to the ns path node + */ + srcu_idx = srcu_read_lock(&head->srcu); + + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { + /* + * Ensure that ns path disk node is already added otherwise we + * may get invalid kobj name for target + */ + if (!test_bit(GD_ADDED, &ns->disk->state)) + continue; + + /* + * Avoid creating link if it already exists for the given path. + * When path ana state transitions from optimized to non- + * optimized or vice-versa, the nvme_mpath_set_live() is + * invoked which in truns call this function. Now if the sysfs + * link already exists for the given path and we attempt to re- + * create the link then sysfs code would warn about it loudly. + * So we evaluate NVME_NS_SYSFS_ATTR_LINK flag here to ensure + * that we're not creating duplicate link. + * The test_and_set_bit() is used because it is protecting + * against multiple nvme paths being simultaneously added. + */ + if (test_and_set_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) + continue; + + target = disk_to_dev(ns->disk); + /* + * Create sysfs link from head gendisk kobject @kobj to the + * ns path gendisk kobject @target->kobj. + */ + rc = sysfs_add_link_to_group(kobj, nvme_ns_mpath_attr_group.name, + &target->kobj, dev_name(target)); + if (unlikely(rc)) { + dev_err(disk_to_dev(ns->head->disk), + "failed to create link to %s\n", + dev_name(target)); + clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags); + } + } + + srcu_read_unlock(&head->srcu, srcu_idx); +} + +void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns) +{ + struct device *target; + struct kobject *kobj; + + if (!test_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags)) + return; + + target = disk_to_dev(ns->disk); + kobj = &disk_to_dev(ns->head->disk)->kobj; + sysfs_remove_link_from_group(kobj, nvme_ns_mpath_attr_group.name, + dev_name(target)); + clear_bit(NVME_NS_SYSFS_ATTR_LINK, &ns->flags); +} + void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) { if (nvme_ctrl_use_ana(ns->ctrl)) { @@ -875,33 +1284,59 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) nvme_mpath_set_live(ns); } - if (blk_queue_stable_writes(ns->queue) && ns->head->disk) - blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, - ns->head->disk->queue); #ifdef CONFIG_BLK_DEV_ZONED if (blk_queue_is_zoned(ns->queue) && ns->head->disk) ns->head->disk->nr_zones = ns->disk->nr_zones; #endif } -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +void nvme_mpath_remove_disk(struct nvme_ns_head *head) { + bool remove = false; + if (!head->disk) return; - kblockd_schedule_work(&head->requeue_work); - if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { - nvme_cdev_del(&head->cdev, &head->cdev_device); - del_gendisk(head->disk); + + mutex_lock(&head->subsys->lock); + /* + * We are called when all paths have been removed, and at that point + * head->list is expected to be empty. However, nvme_remove_ns() and + * nvme_init_ns_head() can run concurrently and so if head->delayed_ + * removal_secs is configured, it is possible that by the time we reach + * this point, head->list may no longer be empty. Therefore, we recheck + * head->list here. If it is no longer empty then we skip enqueuing the + * delayed head removal work. + */ + if (!list_empty(&head->list)) + goto out; + + if (head->delayed_removal_secs) { + /* + * Ensure that no one could remove this module while the head + * remove work is pending. + */ + if (!try_module_get(THIS_MODULE)) + goto out; + mod_delayed_work(nvme_wq, &head->remove_work, + head->delayed_removal_secs * HZ); + } else { + list_del_init(&head->entry); + remove = true; } +out: + mutex_unlock(&head->subsys->lock); + if (remove) + nvme_remove_head(head); } -void nvme_mpath_remove_disk(struct nvme_ns_head *head) +void nvme_mpath_put_disk(struct nvme_ns_head *head) { if (!head->disk) return; /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); flush_work(&head->requeue_work); + flush_work(&head->partition_scan_work); put_disk(head->disk); } @@ -923,6 +1358,9 @@ int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)) return 0; + /* initialize this in the identify path to cover controller resets */ + atomic_set(&ctrl->nr_active, 0); + if (!ctrl->max_namespaces || ctrl->max_namespaces > le32_to_cpu(id->nn)) { dev_err(ctrl->device, diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 68b400f9c42d..9a5f28c5103c 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -49,6 +49,7 @@ extern unsigned int admin_timeout; extern struct workqueue_struct *nvme_wq; extern struct workqueue_struct *nvme_reset_wq; extern struct workqueue_struct *nvme_delete_wq; +extern struct mutex nvme_subsystems_lock; /* * List of workarounds for devices that required behavior not specified in @@ -68,7 +69,7 @@ enum nvme_quirks { NVME_QUIRK_IDENTIFY_CNS = (1 << 1), /* - * The controller deterministically returns O's on reads to + * The controller deterministically returns 0's on reads to * logical blocks that deallocate was called on. */ NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), @@ -90,6 +91,11 @@ enum nvme_quirks { NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), /* + * Problems seen with concurrent commands + */ + NVME_QUIRK_QDEPTH_ONE = (1 << 6), + + /* * Set MEDIUM priority on SQ creation */ NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), @@ -167,6 +173,11 @@ enum nvme_quirks { * MSI (but not MSI-X) interrupts are broken and never fire. */ NVME_QUIRK_BROKEN_MSI = (1 << 21), + + /* + * Align dma pool segment size to 512 bytes + */ + NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22), }; /* @@ -195,6 +206,7 @@ enum { NVME_REQ_CANCELLED = (1 << 0), NVME_REQ_USERCMD = (1 << 1), NVME_MPATH_IO_STATS = (1 << 2), + NVME_MPATH_CNT_ACTIVE = (1 << 3), }; static inline struct nvme_request *nvme_req(struct request *req) @@ -299,7 +311,6 @@ struct nvme_ctrl { struct opal_dev *opal_dev; - char name[12]; u16 cntlid; u16 mtfa; @@ -360,6 +371,7 @@ struct nvme_ctrl { size_t ana_log_size; struct timer_list anatt_timer; struct work_struct ana_work; + atomic_t nr_active; #endif #ifdef CONFIG_NVME_HOST_AUTH @@ -370,7 +382,7 @@ struct nvme_ctrl { struct nvme_dhchap_key *ctrl_key; u16 transaction; #endif - struct key *tls_key; + key_serial_t tls_pskid; /* Power saving configuration */ u64 ps_max_latency_us; @@ -408,6 +420,7 @@ static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) enum nvme_iopolicy { NVME_IOPOLICY_NUMA, NVME_IOPOLICY_RR, + NVME_IOPOLICY_QD, }; struct nvme_subsystem { @@ -429,7 +442,7 @@ struct nvme_subsystem { u8 cmic; enum nvme_subsys_type subtype; u16 vendor_id; - u16 awupf; /* 0's based awupf value. */ + u16 awupf; /* 0's based value. */ struct ida ns_ida; #ifdef CONFIG_NVME_MULTIPATH enum nvme_iopolicy iopolicy; @@ -458,20 +471,20 @@ struct nvme_ns_head { struct srcu_struct srcu; struct nvme_subsystem *subsys; struct nvme_ns_ids ids; + u8 lba_shift; + u16 ms; + u16 pi_size; + u8 pi_type; + u8 guard_type; struct list_head entry; struct kref ref; bool shared; + bool rotational; bool passthru_err_log_enabled; - int instance; struct nvme_effects_log *effects; u64 nuse; unsigned ns_id; - int lba_shift; - u16 ms; - u16 pi_size; - u8 pi_type; - u8 pi_offset; - u8 guard_type; + int instance; #ifdef CONFIG_BLK_DEV_ZONED u64 zsze; #endif @@ -483,13 +496,20 @@ struct nvme_ns_head { struct device cdev_device; struct gendisk *disk; + + u16 nr_plids; + u16 *plids; #ifdef CONFIG_NVME_MULTIPATH struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct work_struct partition_scan_work; struct mutex lock; unsigned long flags; -#define NVME_NSHEAD_DISK_LIVE 0 + struct delayed_work remove_work; + unsigned int delayed_removal_secs; +#define NVME_NSHEAD_DISK_LIVE 0 +#define NVME_NSHEAD_QUEUE_IF_NO_PATH 1 struct nvme_ns __rcu *current_path[]; #endif }; @@ -502,7 +522,7 @@ static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) enum nvme_ns_features { NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ - NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeores supported */ + NVME_NS_DEAC = 1 << 2, /* DEAC bit in Write Zeroes supported */ }; struct nvme_ns { @@ -520,10 +540,11 @@ struct nvme_ns { struct nvme_ns_head *head; unsigned long flags; -#define NVME_NS_REMOVING 0 -#define NVME_NS_ANA_PENDING 2 -#define NVME_NS_FORCE_RO 3 -#define NVME_NS_READY 4 +#define NVME_NS_REMOVING 0 +#define NVME_NS_ANA_PENDING 2 +#define NVME_NS_FORCE_RO 3 +#define NVME_NS_READY 4 +#define NVME_NS_SYSFS_ATTR_LINK 5 struct cdev cdev; struct device cdev_device; @@ -537,6 +558,12 @@ static inline bool nvme_ns_has_pi(struct nvme_ns_head *head) return head->pi_type && head->ms == head->pi_size; } +static inline unsigned long nvme_get_virt_boundary(struct nvme_ctrl *ctrl, + bool is_admin) +{ + return NVME_CTRL_PAGE_SIZE - 1; +} + struct nvme_ctrl_ops { const char *name; struct module *module; @@ -551,11 +578,13 @@ struct nvme_ctrl_ops { int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); void (*free_ctrl)(struct nvme_ctrl *ctrl); void (*submit_async_event)(struct nvme_ctrl *ctrl); + int (*subsystem_reset)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); void (*stop_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); void (*print_device_info)(struct nvme_ctrl *ctrl); bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl); + unsigned long (*get_virt_boundary)(struct nvme_ctrl *ctrl, bool is_admin); }; /* @@ -649,18 +678,9 @@ int nvme_try_sched_reset(struct nvme_ctrl *ctrl); static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) { - int ret; - - if (!ctrl->subsystem) + if (!ctrl->subsystem || !ctrl->ops->subsystem_reset) return -ENOTTY; - if (!nvme_wait_reset(ctrl)) - return -EBUSY; - - ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); - if (ret) - return ret; - - return nvme_try_sched_reset(ctrl); + return ctrl->ops->subsystem_reset(ctrl); } /* @@ -689,7 +709,7 @@ static inline u32 nvme_bytes_to_numd(size_t len) static inline bool nvme_is_ana_error(u16 status) { - switch (status & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_ANA_TRANSITION: case NVME_SC_ANA_INACCESSIBLE: case NVME_SC_ANA_PERSISTENT_LOSS: @@ -702,7 +722,7 @@ static inline bool nvme_is_ana_error(u16 status) static inline bool nvme_is_path_error(u16 status) { /* check for a status code type of 'path related status' */ - return (status & 0x700) == 0x300; + return (status & NVME_SCT_MASK) == NVME_SCT_PATH; } /* @@ -792,6 +812,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown); int nvme_enable_ctrl(struct nvme_ctrl *ctrl); int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks); +int nvme_add_ctrl(struct nvme_ctrl *ctrl); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl); @@ -877,7 +898,7 @@ enum { NVME_SUBMIT_NOWAIT = (__force nvme_submit_flags_t)(1 << 1), /* Set BLK_MQ_REQ_RESERVED when allocating request */ NVME_SUBMIT_RESERVED = (__force nvme_submit_flags_t)(1 << 2), - /* Retry command when NVME_SC_DNR is not set in the result */ + /* Retry command when NVME_STATUS_DNR is not set in the result */ NVME_SUBMIT_RETRY = (__force nvme_submit_flags_t)(1 << 3), }; @@ -888,10 +909,10 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, int qid, nvme_submit_flags_t flags); int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, unsigned int dword11, void *buffer, size_t buflen, - u32 *result); + void *result); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); int nvme_reset_ctrl(struct nvme_ctrl *ctrl); @@ -922,10 +943,11 @@ int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); int nvme_identify_ns(struct nvme_ctrl *ctrl, unsigned nsid, struct nvme_id_ns **id); -int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); +int nvme_getgeo(struct gendisk *disk, struct hd_geometry *geo); int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); extern const struct attribute_group *nvme_ns_attr_groups[]; +extern const struct attribute_group nvme_ns_mpath_attr_group; extern const struct pr_ops nvme_pr_ops; extern const struct block_device_operations nvme_ns_head_ops; extern const struct attribute_group nvme_dev_attrs_group; @@ -948,8 +970,10 @@ void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); void nvme_failover_req(struct request *req); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); +void nvme_mpath_add_sysfs_link(struct nvme_ns_head *ns); +void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns); void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); -void nvme_mpath_remove_disk(struct nvme_ns_head *head); +void nvme_mpath_put_disk(struct nvme_ns_head *head); int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); void nvme_mpath_update(struct nvme_ctrl *ctrl); @@ -958,7 +982,7 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl); bool nvme_mpath_clear_current_path(struct nvme_ns *ns); void nvme_mpath_revalidate_paths(struct nvme_ns *ns); void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); -void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); +void nvme_mpath_remove_disk(struct nvme_ns_head *head); void nvme_mpath_start_request(struct request *rq); void nvme_mpath_end_request(struct request *rq); @@ -973,12 +997,21 @@ static inline void nvme_trace_bio_complete(struct request *req) extern bool multipath; extern struct device_attribute dev_attr_ana_grpid; extern struct device_attribute dev_attr_ana_state; +extern struct device_attribute dev_attr_queue_depth; +extern struct device_attribute dev_attr_numa_nodes; +extern struct device_attribute dev_attr_delayed_removal_secs; extern struct device_attribute subsys_attr_iopolicy; static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return disk->fops == &nvme_ns_head_ops; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + if (test_bit(NVME_NSHEAD_QUEUE_IF_NO_PATH, &head->flags)) + return true; + return false; +} #else #define multipath false static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) @@ -999,7 +1032,13 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) { } -static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_put_disk(struct nvme_ns_head *head) +{ +} +static inline void nvme_mpath_add_sysfs_link(struct nvme_ns *ns) +{ +} +static inline void nvme_mpath_remove_sysfs_link(struct nvme_ns *ns) { } static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) @@ -1012,7 +1051,7 @@ static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) { } -static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) { } static inline void nvme_trace_bio_complete(struct request *req) @@ -1060,8 +1099,15 @@ static inline bool nvme_disk_is_ns_head(struct gendisk *disk) { return false; } +static inline bool nvme_mpath_queue_if_no_path(struct nvme_ns_head *head) +{ + return false; +} #endif /* CONFIG_NVME_MULTIPATH */ +int nvme_ns_get_unique_id(struct nvme_ns *ns, u8 id[16], + enum blk_unique_id type); + struct nvme_zone_info { u64 zone_size; unsigned int max_open_zones; @@ -1069,7 +1115,7 @@ struct nvme_zone_info { }; int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data); + unsigned int nr_zones, struct blk_report_zones_args *args); int nvme_query_zone_info(struct nvme_ns *ns, unsigned lbaf, struct nvme_zone_info *zi); void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, @@ -1118,7 +1164,15 @@ static inline void nvme_start_request(struct request *rq) static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) { - return ctrl->sgls & ((1 << 0) | (1 << 1)); + return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED | + NVME_CTRL_SGLS_DWORD_ALIGNED); +} + +static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl) +{ + if (ctrl->ops->flags & NVME_F_FABRICS) + return true; + return ctrl->sgls & NVME_CTRL_SGLS_MSDS; } #ifdef CONFIG_NVME_HOST_AUTH @@ -1129,6 +1183,7 @@ void nvme_auth_stop(struct nvme_ctrl *ctrl); int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid); int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid); void nvme_auth_free(struct nvme_ctrl *ctrl); +void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl); #else static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) { @@ -1151,6 +1206,7 @@ static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) return -EPROTONOSUPPORT; } static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; +static inline void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) {}; #endif u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, @@ -1169,43 +1225,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; } -#ifdef CONFIG_NVME_VERBOSE_ERRORS -const char *nvme_get_error_status_str(u16 status); -const char *nvme_get_opcode_str(u8 opcode); -const char *nvme_get_admin_opcode_str(u8 opcode); -const char *nvme_get_fabrics_opcode_str(u8 opcode); -#else /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const char *nvme_get_error_status_str(u16 status) -{ - return "I/O Error"; -} -static inline const char *nvme_get_opcode_str(u8 opcode) -{ - return "I/O Cmd"; -} -static inline const char *nvme_get_admin_opcode_str(u8 opcode) -{ - return "Admin Cmd"; -} - -static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) -{ - return "Fabrics Cmd"; -} -#endif /* CONFIG_NVME_VERBOSE_ERRORS */ - -static inline const char *nvme_opcode_str(int qid, u8 opcode) -{ - return qid ? nvme_get_opcode_str(opcode) : - nvme_get_admin_opcode_str(opcode); -} - -static inline const char *nvme_fabrics_opcode_str( - int qid, const struct nvme_command *cmd) -{ - if (nvme_is_fabrics(cmd)) - return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); - - return nvme_opcode_str(qid, cmd->common.opcode); -} #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 102a9fb0c65f..0e4caeab739c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -7,8 +7,7 @@ #include <linux/acpi.h> #include <linux/async.h> #include <linux/blkdev.h> -#include <linux/blk-mq.h> -#include <linux/blk-mq-pci.h> +#include <linux/blk-mq-dma.h> #include <linux/blk-integrity.h> #include <linux/dmi.h> #include <linux/init.h> @@ -19,6 +18,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/mutex.h> +#include <linux/nodemask.h> #include <linux/once.h> #include <linux/pci.h> #include <linux/suspend.h> @@ -27,7 +27,6 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/sed-opal.h> -#include <linux/pci-p2pdma.h> #include "trace.h" #include "nvme.h" @@ -35,15 +34,43 @@ #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) -#define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) +/* Optimisation for I/Os between 4k and 128k */ +#define NVME_SMALL_POOL_SIZE 256 /* - * These can be higher, but we need to ensure that any command doesn't - * require an sg allocation that needs more than a page of data. + * Arbitrary upper bound. */ -#define NVME_MAX_KB_SZ 8192 -#define NVME_MAX_SEGS 128 -#define NVME_MAX_NR_ALLOCATIONS 5 +#define NVME_MAX_BYTES SZ_8M +#define NVME_MAX_NR_DESCRIPTORS 5 + +/* + * For data SGLs we support a single descriptors worth of SGL entries. + * For PRPs, segments don't matter at all. + */ +#define NVME_MAX_SEGS \ + (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) + +/* + * For metadata SGLs, only the small descriptor is supported, and the first + * entry is the segment descriptor, which for the data pointer sits in the SQE. + */ +#define NVME_MAX_META_SEGS \ + ((NVME_SMALL_POOL_SIZE / sizeof(struct nvme_sgl_desc)) - 1) + +/* + * The last entry is used to link to the next descriptor. + */ +#define PRPS_PER_PAGE \ + (((NVME_CTRL_PAGE_SIZE / sizeof(__le64))) - 1) + +/* + * I/O could be non-aligned both at the beginning and end. + */ +#define MAX_PRP_RANGE \ + (NVME_MAX_BYTES + 2 * (NVME_CTRL_PAGE_SIZE - 1)) + +static_assert(MAX_PRP_RANGE / NVME_CTRL_PAGE_SIZE <= + (1 /* prp1 */ + NVME_MAX_NR_DESCRIPTORS * PRPS_PER_PAGE)); static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0444); @@ -81,7 +108,7 @@ static int io_queue_count_set(const char *val, const struct kernel_param *kp) int ret; ret = kstrtouint(val, 10, &n); - if (ret != 0 || n > num_possible_cpus()) + if (ret != 0 || n > blk_mq_num_possible_queues(0)) return -EINVAL; return param_set_uint(val, kp); } @@ -112,6 +139,11 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); static void nvme_delete_io_queues(struct nvme_dev *dev); static void nvme_update_attrs(struct nvme_dev *dev); +struct nvme_descriptor_pools { + struct dma_pool *large; + struct dma_pool *small; +}; + /* * Represents an NVM Express device. Each nvme_dev is a PCI function. */ @@ -121,8 +153,6 @@ struct nvme_dev { struct blk_mq_tag_set admin_tagset; u32 __iomem *dbs; struct device *dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; unsigned online_queues; unsigned max_qid; unsigned io_queues[HCTX_MAX_TYPES]; @@ -141,8 +171,8 @@ struct nvme_dev { struct nvme_ctrl ctrl; u32 last_ps; bool hmb; - - mempool_t *iod_mempool; + struct sg_table *hmb_sgt; + mempool_t *dmavec_mempool; /* shadow doorbell buffer support: */ __le32 *dbbuf_dbs; @@ -153,12 +183,14 @@ struct nvme_dev { /* host memory buffer support: */ u64 host_mem_size; u32 nr_host_mem_descs; + u32 host_mem_descs_size; dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; unsigned int nr_allocated_queues; unsigned int nr_write_queues; unsigned int nr_poll_queues; + struct nvme_descriptor_pools descriptor_pools[]; }; static int io_queue_depth_set(const char *val, const struct kernel_param *kp) @@ -188,6 +220,7 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) */ struct nvme_queue { struct nvme_dev *dev; + struct nvme_descriptor_pools descriptor_pools; spinlock_t sq_lock; void *sq_cmds; /* only used for poll queues: */ @@ -216,28 +249,57 @@ struct nvme_queue { struct completion delete_done; }; -union nvme_descriptor { - struct nvme_sgl_desc *sg_list; - __le64 *prp_list; +/* bits for iod->flags */ +enum nvme_iod_flags { + /* this command has been aborted by the timeout handler */ + IOD_ABORTED = 1U << 0, + + /* uses the small descriptor pool */ + IOD_SMALL_DESCRIPTOR = 1U << 1, + + /* single segment dma mapping */ + IOD_SINGLE_SEGMENT = 1U << 2, + + /* Data payload contains p2p memory */ + IOD_DATA_P2P = 1U << 3, + + /* Metadata contains p2p memory */ + IOD_META_P2P = 1U << 4, + + /* Data payload contains MMIO memory */ + IOD_DATA_MMIO = 1U << 5, + + /* Metadata contains MMIO memory */ + IOD_META_MMIO = 1U << 6, + + /* Metadata using non-coalesced MPTR */ + IOD_SINGLE_META_SEGMENT = 1U << 7, +}; + +struct nvme_dma_vec { + dma_addr_t addr; + unsigned int len; }; /* * The nvme_iod describes the data in an I/O. - * - * The sg pointer contains the list of PRP/SGL chunk allocations in addition - * to the actual struct scatterlist. */ struct nvme_iod { struct nvme_request req; struct nvme_command cmd; - bool aborted; - s8 nr_allocations; /* PRP list pool allocations. 0 means small - pool in use */ - unsigned int dma_len; /* length of single DMA segment mapping */ - dma_addr_t first_dma; + u8 flags; + u8 nr_descriptors; + + unsigned int total_len; + struct dma_iova_state dma_state; + void *descriptors[NVME_MAX_NR_DESCRIPTORS]; + struct nvme_dma_vec *dma_vecs; + unsigned int nr_dma_vecs; + dma_addr_t meta_dma; - struct sg_table sgt; - union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; + unsigned int meta_total_len; + struct dma_iova_state meta_dma_state; + struct nvme_sgl_desc *meta_descriptor; }; static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) @@ -367,7 +429,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, /* * Ensure that the doorbell is updated before reading the event * index from memory. The controller needs to provide similar - * ordering to ensure the envent index is updated before reading + * ordering to ensure the event index is updated before reading * the doorbell. */ mb(); @@ -380,42 +442,78 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, return true; } -/* - * Will slightly overestimate the number of pages needed. This is OK - * as it only leads to a small amount of wasted memory for the lifetime of - * the I/O. - */ -static int nvme_pci_npages_prp(void) +static struct nvme_descriptor_pools * +nvme_setup_descriptor_pools(struct nvme_dev *dev, unsigned numa_node) { - unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; - unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); - return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[numa_node]; + size_t small_align = NVME_SMALL_POOL_SIZE; + + if (pools->small) + return pools; /* already initialized */ + + pools->large = dma_pool_create_node("nvme descriptor page", dev->dev, + NVME_CTRL_PAGE_SIZE, NVME_CTRL_PAGE_SIZE, 0, numa_node); + if (!pools->large) + return ERR_PTR(-ENOMEM); + + if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512) + small_align = 512; + + pools->small = dma_pool_create_node("nvme descriptor small", dev->dev, + NVME_SMALL_POOL_SIZE, small_align, 0, numa_node); + if (!pools->small) { + dma_pool_destroy(pools->large); + pools->large = NULL; + return ERR_PTR(-ENOMEM); + } + + return pools; } -static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static void nvme_release_descriptor_pools(struct nvme_dev *dev) { - struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[0]; + unsigned i; - WARN_ON(hctx_idx != 0); - WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); + for (i = 0; i < nr_node_ids; i++) { + struct nvme_descriptor_pools *pools = &dev->descriptor_pools[i]; - hctx->driver_data = nvmeq; - return 0; + dma_pool_destroy(pools->large); + dma_pool_destroy(pools->small); + } } -static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, - unsigned int hctx_idx) +static int nvme_init_hctx_common(struct blk_mq_hw_ctx *hctx, void *data, + unsigned qid) { struct nvme_dev *dev = to_nvme_dev(data); - struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; + struct nvme_queue *nvmeq = &dev->queues[qid]; + struct nvme_descriptor_pools *pools; + struct blk_mq_tags *tags; - WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); + tags = qid ? dev->tagset.tags[qid - 1] : dev->admin_tagset.tags[0]; + WARN_ON(tags != hctx->tags); + pools = nvme_setup_descriptor_pools(dev, hctx->numa_node); + if (IS_ERR(pools)) + return PTR_ERR(pools); + + nvmeq->descriptor_pools = *pools; hctx->driver_data = nvmeq; return 0; } +static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + WARN_ON(hctx_idx != 0); + return nvme_init_hctx_common(hctx, data, 0); +} + +static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, + unsigned int hctx_idx) +{ + return nvme_init_hctx_common(hctx, data, hctx_idx + 1); +} + static int nvme_pci_init_request(struct blk_mq_tag_set *set, struct request *req, unsigned int hctx_idx, unsigned int numa_node) @@ -457,7 +555,7 @@ static void nvme_pci_map_queues(struct blk_mq_tag_set *set) */ map->queue_offset = qoff; if (i != HCTX_TYPE_POLL && offset) - blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); + blk_mq_map_hw_queues(map, dev->dev, offset); else blk_mq_map_queues(map); qoff += map->nr_queues; @@ -504,173 +602,368 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) spin_unlock(&nvmeq->sq_lock); } -static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, - int nseg) +enum nvme_use_sgl { + SGL_UNSUPPORTED, + SGL_SUPPORTED, + SGL_FORCED, +}; + +static inline bool nvme_pci_metadata_use_sgls(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - unsigned int avg_seg_size; - - avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); + struct nvme_dev *dev = nvmeq->dev; - if (!nvme_ctrl_sgl_supported(&dev->ctrl)) + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) return false; - if (!nvmeq->qid) - return false; - if (!sgl_threshold || avg_seg_size < sgl_threshold) - return false; - return true; + return req->nr_integrity_segments > 1 || + nvme_req(req)->flags & NVME_REQ_USERCMD; +} + +static inline enum nvme_use_sgl nvme_pci_use_sgls(struct nvme_dev *dev, + struct request *req) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + + if (nvmeq->qid && nvme_ctrl_sgl_supported(&dev->ctrl)) { + /* + * When the controller is capable of using SGL, there are + * several conditions that we force to use it: + * + * 1. A request containing page gaps within the controller's + * mask can not use the PRP format. + * + * 2. User commands use SGL because that lets the device + * validate the requested transfer lengths. + * + * 3. Multiple integrity segments must use SGL as that's the + * only way to describe such a command in NVMe. + */ + if (req_phys_gap_mask(req) & (NVME_CTRL_PAGE_SIZE - 1) || + nvme_req(req)->flags & NVME_REQ_USERCMD || + req->nr_integrity_segments > 1) + return SGL_FORCED; + return SGL_SUPPORTED; + } + + return SGL_UNSUPPORTED; } -static void nvme_free_prps(struct nvme_dev *dev, struct request *req) +static unsigned int nvme_pci_avg_seg_size(struct request *req) { + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + unsigned int nseg; + + if (blk_rq_dma_map_coalesce(&iod->dma_state)) + nseg = 1; + else + nseg = blk_rq_nr_phys_segments(req); + return DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); +} + +static inline struct dma_pool *nvme_dma_pool(struct nvme_queue *nvmeq, + struct nvme_iod *iod) +{ + if (iod->flags & IOD_SMALL_DESCRIPTOR) + return nvmeq->descriptor_pools.small; + return nvmeq->descriptor_pools.large; +} + +static inline bool nvme_pci_cmd_use_meta_sgl(struct nvme_command *cmd) +{ + return (cmd->common.flags & NVME_CMD_SGL_ALL) == NVME_CMD_SGL_METASEG; +} + +static inline bool nvme_pci_cmd_use_sgl(struct nvme_command *cmd) +{ + return cmd->common.flags & + (NVME_CMD_SGL_METABUF | NVME_CMD_SGL_METASEG); +} + +static inline dma_addr_t nvme_pci_first_desc_dma_addr(struct nvme_command *cmd) +{ + if (nvme_pci_cmd_use_sgl(cmd)) + return le64_to_cpu(cmd->common.dptr.sgl.addr); + return le64_to_cpu(cmd->common.dptr.prp2); +} + +static void nvme_free_descriptors(struct request *req) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - dma_addr_t dma_addr = iod->first_dma; + dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); int i; - for (i = 0; i < iod->nr_allocations; i++) { - __le64 *prp_list = iod->list[i].prp_list; + if (iod->nr_descriptors == 1) { + dma_pool_free(nvme_dma_pool(nvmeq, iod), iod->descriptors[0], + dma_addr); + return; + } + + for (i = 0; i < iod->nr_descriptors; i++) { + __le64 *prp_list = iod->descriptors[i]; dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); - dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_pool_free(nvmeq->descriptor_pools.large, prp_list, + dma_addr); dma_addr = next_dma_addr; } } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_free_prps(struct request *req, unsigned int attrs) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int i; + + for (i = 0; i < iod->nr_dma_vecs; i++) + dma_unmap_phys(nvmeq->dev->dev, iod->dma_vecs[i].addr, + iod->dma_vecs[i].len, rq_dma_dir(req), attrs); + mempool_free(iod->dma_vecs, nvmeq->dev->dmavec_mempool); +} + +static void nvme_free_sgls(struct request *req, struct nvme_sgl_desc *sge, + struct nvme_sgl_desc *sg_list, unsigned int attrs) +{ + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + enum dma_data_direction dir = rq_dma_dir(req); + unsigned int len = le32_to_cpu(sge->length); + struct device *dma_dev = nvmeq->dev->dev; + unsigned int i; + + if (sge->type == (NVME_SGL_FMT_DATA_DESC << 4)) { + dma_unmap_phys(dma_dev, le64_to_cpu(sge->addr), len, dir, + attrs); + return; + } + + for (i = 0; i < len / sizeof(*sg_list); i++) + dma_unmap_phys(dma_dev, le64_to_cpu(sg_list[i].addr), + le32_to_cpu(sg_list[i].length), dir, attrs); +} + +static void nvme_unmap_metadata(struct request *req) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE; + enum dma_data_direction dir = rq_dma_dir(req); struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct device *dma_dev = nvmeq->dev->dev; + struct nvme_sgl_desc *sge = iod->meta_descriptor; + unsigned int attrs = 0; - if (iod->dma_len) { - dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, + if (iod->flags & IOD_SINGLE_META_SEGMENT) { + dma_unmap_page(dma_dev, iod->meta_dma, + rq_integrity_vec(req).bv_len, rq_dma_dir(req)); return; } - WARN_ON_ONCE(!iod->sgt.nents); + if (iod->flags & IOD_META_P2P) + map = PCI_P2PDMA_MAP_BUS_ADDR; + else if (iod->flags & IOD_META_MMIO) { + map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + attrs |= DMA_ATTR_MMIO; + } - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); + if (!blk_rq_dma_unmap(req, dma_dev, &iod->meta_dma_state, + iod->meta_total_len, map)) { + if (nvme_pci_cmd_use_meta_sgl(&iod->cmd)) + nvme_free_sgls(req, sge, &sge[1], attrs); + else + dma_unmap_phys(dma_dev, iod->meta_dma, + iod->meta_total_len, dir, attrs); + } - if (iod->nr_allocations == 0) - dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, - iod->first_dma); - else if (iod->nr_allocations == 1) - dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, - iod->first_dma); - else - nvme_free_prps(dev, req); - mempool_free(iod->sgt.sgl, dev->iod_mempool); + if (iod->meta_descriptor) + dma_pool_free(nvmeq->descriptor_pools.small, + iod->meta_descriptor, iod->meta_dma); } -static void nvme_print_sgl(struct scatterlist *sgl, int nents) +static void nvme_unmap_data(struct request *req) { - int i; - struct scatterlist *sg; + enum pci_p2pdma_map_type map = PCI_P2PDMA_MAP_NONE; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct device *dma_dev = nvmeq->dev->dev; + unsigned int attrs = 0; + + if (iod->flags & IOD_SINGLE_SEGMENT) { + static_assert(offsetof(union nvme_data_ptr, prp1) == + offsetof(union nvme_data_ptr, sgl.addr)); + dma_unmap_page(dma_dev, le64_to_cpu(iod->cmd.common.dptr.prp1), + iod->total_len, rq_dma_dir(req)); + return; + } + + if (iod->flags & IOD_DATA_P2P) + map = PCI_P2PDMA_MAP_BUS_ADDR; + else if (iod->flags & IOD_DATA_MMIO) { + map = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; + attrs |= DMA_ATTR_MMIO; + } - for_each_sg(sgl, sg, nents, i) { - dma_addr_t phys = sg_phys(sg); - pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " - "dma_address:%pad dma_length:%d\n", - i, &phys, sg->offset, sg->length, &sg_dma_address(sg), - sg_dma_len(sg)); + if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, + map)) { + if (nvme_pci_cmd_use_sgl(&iod->cmd)) + nvme_free_sgls(req, iod->descriptors[0], + &iod->cmd.common.dptr.sgl, attrs); + else + nvme_free_prps(req, attrs); } + + if (iod->nr_descriptors) + nvme_free_descriptors(req); } -static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd) +static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev, + struct blk_dma_iter *iter) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; - int length = blk_rq_payload_bytes(req); - struct scatterlist *sg = iod->sgt.sgl; - int dma_len = sg_dma_len(sg); - u64 dma_addr = sg_dma_address(sg); - int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); + + if (iter->len) + return true; + if (!blk_rq_dma_map_iter_next(req, dma_dev, &iod->dma_state, iter)) + return false; + if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) { + iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr; + iod->dma_vecs[iod->nr_dma_vecs].len = iter->len; + iod->nr_dma_vecs++; + } + return true; +} + +static blk_status_t nvme_pci_setup_data_prp(struct request *req, + struct blk_dma_iter *iter) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int length = blk_rq_payload_bytes(req); + dma_addr_t prp1_dma, prp2_dma = 0; + unsigned int prp_len, i; __le64 *prp_list; - dma_addr_t prp_dma; - int nprps, i; - length -= (NVME_CTRL_PAGE_SIZE - offset); - if (length <= 0) { - iod->first_dma = 0; - goto done; + if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(nvmeq->dev->dev)) { + iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool, + GFP_ATOMIC); + if (!iod->dma_vecs) + return BLK_STS_RESOURCE; + iod->dma_vecs[0].addr = iter->addr; + iod->dma_vecs[0].len = iter->len; + iod->nr_dma_vecs = 1; } - dma_len -= (NVME_CTRL_PAGE_SIZE - offset); - if (dma_len) { - dma_addr += (NVME_CTRL_PAGE_SIZE - offset); - } else { - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); + /* + * PRP1 always points to the start of the DMA transfers. + * + * This is the only PRP (except for the list entries) that could be + * non-aligned. + */ + prp1_dma = iter->addr; + prp_len = min(length, NVME_CTRL_PAGE_SIZE - + (iter->addr & (NVME_CTRL_PAGE_SIZE - 1))); + iod->total_len += prp_len; + iter->addr += prp_len; + iter->len -= prp_len; + length -= prp_len; + if (!length) + goto done; + + if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { + if (WARN_ON_ONCE(!iter->status)) + goto bad_sgl; + goto done; } + /* + * PRP2 is usually a list, but can point to data if all data to be + * transferred fits into PRP1 + PRP2: + */ if (length <= NVME_CTRL_PAGE_SIZE) { - iod->first_dma = dma_addr; + prp2_dma = iter->addr; + iod->total_len += length; goto done; } - nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); - if (nprps <= (256 / 8)) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE) <= + NVME_SMALL_POOL_SIZE / sizeof(__le64)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); + prp_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &prp2_dma); if (!prp_list) { - iod->nr_allocations = -1; - return BLK_STS_RESOURCE; + iter->status = BLK_STS_RESOURCE; + goto done; } - iod->list[0].prp_list = prp_list; - iod->first_dma = prp_dma; + iod->descriptors[iod->nr_descriptors++] = prp_list; + i = 0; for (;;) { + prp_list[i++] = cpu_to_le64(iter->addr); + prp_len = min(length, NVME_CTRL_PAGE_SIZE); + if (WARN_ON_ONCE(iter->len < prp_len)) + goto bad_sgl; + + iod->total_len += prp_len; + iter->addr += prp_len; + iter->len -= prp_len; + length -= prp_len; + if (!length) + break; + + if (!nvme_pci_prp_iter_next(req, nvmeq->dev->dev, iter)) { + if (WARN_ON_ONCE(!iter->status)) + goto bad_sgl; + goto done; + } + + /* + * If we've filled the entire descriptor, allocate a new that is + * pointed to be the last entry in the previous PRP list. To + * accommodate for that move the last actual entry to the new + * descriptor. + */ if (i == NVME_CTRL_PAGE_SIZE >> 3) { __le64 *old_prp_list = prp_list; - prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); - if (!prp_list) - goto free_prps; - iod->list[iod->nr_allocations++].prp_list = prp_list; + dma_addr_t prp_list_dma; + + prp_list = dma_pool_alloc(nvmeq->descriptor_pools.large, + GFP_ATOMIC, &prp_list_dma); + if (!prp_list) { + iter->status = BLK_STS_RESOURCE; + goto done; + } + iod->descriptors[iod->nr_descriptors++] = prp_list; + prp_list[0] = old_prp_list[i - 1]; - old_prp_list[i - 1] = cpu_to_le64(prp_dma); + old_prp_list[i - 1] = cpu_to_le64(prp_list_dma); i = 1; } - prp_list[i++] = cpu_to_le64(dma_addr); - dma_len -= NVME_CTRL_PAGE_SIZE; - dma_addr += NVME_CTRL_PAGE_SIZE; - length -= NVME_CTRL_PAGE_SIZE; - if (length <= 0) - break; - if (dma_len > 0) - continue; - if (unlikely(dma_len < 0)) - goto bad_sgl; - sg = sg_next(sg); - dma_addr = sg_dma_address(sg); - dma_len = sg_dma_len(sg); } + done: - cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); - cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); - return BLK_STS_OK; -free_prps: - nvme_free_prps(dev, req); - return BLK_STS_RESOURCE; + /* + * nvme_unmap_data uses the DPT field in the SQE to tear down the + * mapping, so initialize it even for failures. + */ + iod->cmd.common.dptr.prp1 = cpu_to_le64(prp1_dma); + iod->cmd.common.dptr.prp2 = cpu_to_le64(prp2_dma); + if (unlikely(iter->status)) + nvme_unmap_data(req); + return iter->status; + bad_sgl: - WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), - "Invalid SGL for payload:%d nents:%d\n", - blk_rq_payload_bytes(req), iod->sgt.nents); + dev_err_once(nvmeq->dev->dev, + "Incorrectly formed request for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), blk_rq_nr_phys_segments(req)); return BLK_STS_IOERR; } static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, - struct scatterlist *sg) + struct blk_dma_iter *iter) { - sge->addr = cpu_to_le64(sg_dma_address(sg)); - sge->length = cpu_to_le32(sg_dma_len(sg)); + sge->addr = cpu_to_le64(iter->addr); + sge->length = cpu_to_le32(iter->len); sge->type = NVME_SGL_FMT_DATA_DESC << 4; } @@ -682,180 +975,263 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; } -static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmd) +static blk_status_t nvme_pci_setup_data_sgl(struct request *req, + struct blk_dma_iter *iter) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - struct dma_pool *pool; + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int entries = blk_rq_nr_phys_segments(req); struct nvme_sgl_desc *sg_list; - struct scatterlist *sg = iod->sgt.sgl; - unsigned int entries = iod->sgt.nents; dma_addr_t sgl_dma; - int i = 0; + unsigned int mapped = 0; - /* setting the transfer type as SGL */ - cmd->flags = NVME_CMD_SGL_METABUF; + /* set the transfer type as SGL */ + iod->cmd.common.flags = NVME_CMD_SGL_METABUF; - if (entries == 1) { - nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); + if (entries == 1 || blk_rq_dma_map_coalesce(&iod->dma_state)) { + nvme_pci_sgl_set_data(&iod->cmd.common.dptr.sgl, iter); + iod->total_len += iter->len; return BLK_STS_OK; } - if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { - pool = dev->prp_small_pool; - iod->nr_allocations = 0; - } else { - pool = dev->prp_page_pool; - iod->nr_allocations = 1; - } + if (entries <= NVME_SMALL_POOL_SIZE / sizeof(*sg_list)) + iod->flags |= IOD_SMALL_DESCRIPTOR; - sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); - if (!sg_list) { - iod->nr_allocations = -1; + sg_list = dma_pool_alloc(nvme_dma_pool(nvmeq, iod), GFP_ATOMIC, + &sgl_dma); + if (!sg_list) return BLK_STS_RESOURCE; - } - - iod->list[0].sg_list = sg_list; - iod->first_dma = sgl_dma; + iod->descriptors[iod->nr_descriptors++] = sg_list; - nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); do { - nvme_pci_sgl_set_data(&sg_list[i++], sg); - sg = sg_next(sg); - } while (--entries > 0); + if (WARN_ON_ONCE(mapped == entries)) { + iter->status = BLK_STS_IOERR; + break; + } + nvme_pci_sgl_set_data(&sg_list[mapped++], iter); + iod->total_len += iter->len; + } while (blk_rq_dma_map_iter_next(req, nvmeq->dev->dev, &iod->dma_state, + iter)); - return BLK_STS_OK; + nvme_pci_sgl_set_seg(&iod->cmd.common.dptr.sgl, sgl_dma, mapped); + if (unlikely(iter->status)) + nvme_unmap_data(req); + return iter->status; } -static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) +static blk_status_t nvme_pci_setup_data_simple(struct request *req, + enum nvme_use_sgl use_sgl) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); - unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; - - iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct bio_vec bv = req_bvec(req); + unsigned int prp1_offset = bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1); + bool prp_possible = prp1_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2; + dma_addr_t dma_addr; + + if (!use_sgl && !prp_possible) + return BLK_STS_AGAIN; + if (is_pci_p2pdma_page(bv.bv_page)) + return BLK_STS_AGAIN; + + dma_addr = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); + if (dma_mapping_error(nvmeq->dev->dev, dma_addr)) return BLK_STS_RESOURCE; - iod->dma_len = bv->bv_len; + iod->total_len = bv.bv_len; + iod->flags |= IOD_SINGLE_SEGMENT; + + if (use_sgl == SGL_FORCED || !prp_possible) { + iod->cmd.common.flags = NVME_CMD_SGL_METABUF; + iod->cmd.common.dptr.sgl.addr = cpu_to_le64(dma_addr); + iod->cmd.common.dptr.sgl.length = cpu_to_le32(bv.bv_len); + iod->cmd.common.dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; + } else { + unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - prp1_offset; + + iod->cmd.common.dptr.prp1 = cpu_to_le64(dma_addr); + iod->cmd.common.dptr.prp2 = 0; + if (bv.bv_len > first_prp_len) + iod->cmd.common.dptr.prp2 = + cpu_to_le64(dma_addr + first_prp_len); + } - cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); - if (bv->bv_len > first_prp_len) - cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); - else - cmnd->dptr.prp2 = 0; return BLK_STS_OK; } -static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, - struct request *req, struct nvme_rw_command *cmnd, - struct bio_vec *bv) +static blk_status_t nvme_map_data(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct nvme_dev *dev = nvmeq->dev; + enum nvme_use_sgl use_sgl = nvme_pci_use_sgls(dev, req); + struct blk_dma_iter iter; + blk_status_t ret; + + /* + * Try to skip the DMA iterator for single segment requests, as that + * significantly improves performances for small I/O sizes. + */ + if (blk_rq_nr_phys_segments(req) == 1) { + ret = nvme_pci_setup_data_simple(req, use_sgl); + if (ret != BLK_STS_AGAIN) + return ret; + } + + if (!blk_rq_dma_map_iter_start(req, dev->dev, &iod->dma_state, &iter)) + return iter.status; - iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->first_dma)) + switch (iter.p2pdma.map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + iod->flags |= IOD_DATA_P2P; + break; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + iod->flags |= IOD_DATA_MMIO; + break; + case PCI_P2PDMA_MAP_NONE: + break; + default: return BLK_STS_RESOURCE; - iod->dma_len = bv->bv_len; + } - cmnd->flags = NVME_CMD_SGL_METABUF; - cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); - cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); - cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; - return BLK_STS_OK; + if (use_sgl == SGL_FORCED || + (use_sgl == SGL_SUPPORTED && + (sgl_threshold && nvme_pci_avg_seg_size(req) >= sgl_threshold))) + return nvme_pci_setup_data_sgl(req, &iter); + return nvme_pci_setup_data_prp(req, &iter); } -static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, - struct nvme_command *cmnd) +static blk_status_t nvme_pci_setup_meta_iter(struct request *req) { + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + unsigned int entries = req->nr_integrity_segments; struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - blk_status_t ret = BLK_STS_RESOURCE; - int rc; + struct nvme_dev *dev = nvmeq->dev; + struct nvme_sgl_desc *sg_list; + struct blk_dma_iter iter; + dma_addr_t sgl_dma; + int i = 0; - if (blk_rq_nr_phys_segments(req) == 1) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - struct bio_vec bv = req_bvec(req); - - if (!is_pci_p2pdma_page(bv.bv_page)) { - if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + - bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) - return nvme_setup_prp_simple(dev, req, - &cmnd->rw, &bv); - - if (nvmeq->qid && sgl_threshold && - nvme_ctrl_sgl_supported(&dev->ctrl)) - return nvme_setup_sgl_simple(dev, req, - &cmnd->rw, &bv); - } + if (!blk_rq_integrity_dma_map_iter_start(req, dev->dev, + &iod->meta_dma_state, &iter)) + return iter.status; + + switch (iter.p2pdma.map) { + case PCI_P2PDMA_MAP_BUS_ADDR: + iod->flags |= IOD_META_P2P; + break; + case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: + iod->flags |= IOD_META_MMIO; + break; + case PCI_P2PDMA_MAP_NONE: + break; + default: + return BLK_STS_RESOURCE; + } + + if (blk_rq_dma_map_coalesce(&iod->meta_dma_state)) + entries = 1; + + /* + * The NVMe MPTR descriptor has an implicit length that the host and + * device must agree on to avoid data/memory corruption. We trust the + * kernel allocated correctly based on the format's parameters, so use + * the more efficient MPTR to avoid extra dma pool allocations for the + * SGL indirection. + * + * But for user commands, we don't necessarily know what they do, so + * the driver can't validate the metadata buffer size. The SGL + * descriptor provides an explicit length, so we're relying on that + * mechanism to catch any misunderstandings between the application and + * device. + * + * P2P DMA also needs to use the blk_dma_iter method, so mptr setup + * leverages this routine when that happens. + */ + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) || + (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) { + iod->cmd.common.metadata = cpu_to_le64(iter.addr); + iod->meta_total_len = iter.len; + iod->meta_dma = iter.addr; + iod->meta_descriptor = NULL; + return BLK_STS_OK; } - iod->dma_len = 0; - iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); - if (!iod->sgt.sgl) + sg_list = dma_pool_alloc(nvmeq->descriptor_pools.small, GFP_ATOMIC, + &sgl_dma); + if (!sg_list) return BLK_STS_RESOURCE; - sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); - iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); - if (!iod->sgt.orig_nents) - goto out_free_sg; - rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), - DMA_ATTR_NO_WARN); - if (rc) { - if (rc == -EREMOTEIO) - ret = BLK_STS_TARGET; - goto out_free_sg; + iod->meta_descriptor = sg_list; + iod->meta_dma = sgl_dma; + iod->cmd.common.flags = NVME_CMD_SGL_METASEG; + iod->cmd.common.metadata = cpu_to_le64(sgl_dma); + if (entries == 1) { + iod->meta_total_len = iter.len; + nvme_pci_sgl_set_data(sg_list, &iter); + return BLK_STS_OK; } - if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) - ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); - else - ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); - if (ret != BLK_STS_OK) - goto out_unmap_sg; - return BLK_STS_OK; + sgl_dma += sizeof(*sg_list); + do { + nvme_pci_sgl_set_data(&sg_list[++i], &iter); + iod->meta_total_len += iter.len; + } while (blk_rq_integrity_dma_map_iter_next(req, dev->dev, &iter)); -out_unmap_sg: - dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); -out_free_sg: - mempool_free(iod->sgt.sgl, dev->iod_mempool); - return ret; + nvme_pci_sgl_set_seg(sg_list, sgl_dma, i); + if (unlikely(iter.status)) + nvme_unmap_metadata(req); + return iter.status; } -static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, - struct nvme_command *cmnd) +static blk_status_t nvme_pci_setup_meta_mptr(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + struct bio_vec bv = rq_integrity_vec(req); + + if (is_pci_p2pdma_page(bv.bv_page)) + return nvme_pci_setup_meta_iter(req); - iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), - rq_dma_dir(req), 0); - if (dma_mapping_error(dev->dev, iod->meta_dma)) + iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); + if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) return BLK_STS_IOERR; - cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); + iod->cmd.common.metadata = cpu_to_le64(iod->meta_dma); + iod->flags |= IOD_SINGLE_META_SEGMENT; return BLK_STS_OK; } -static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) +static blk_status_t nvme_map_metadata(struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && + nvme_pci_metadata_use_sgls(req)) + return nvme_pci_setup_meta_iter(req); + return nvme_pci_setup_meta_mptr(req); +} + +static blk_status_t nvme_prep_rq(struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; - iod->aborted = false; - iod->nr_allocations = -1; - iod->sgt.nents = 0; + iod->flags = 0; + iod->nr_descriptors = 0; + iod->total_len = 0; + iod->meta_total_len = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) return ret; if (blk_rq_nr_phys_segments(req)) { - ret = nvme_map_data(dev, req, &iod->cmd); + ret = nvme_map_data(req); if (ret) goto out_free_cmd; } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, &iod->cmd); + ret = nvme_map_metadata(req); if (ret) goto out_unmap_data; } @@ -863,15 +1239,13 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) nvme_start_request(req); return BLK_STS_OK; out_unmap_data: - nvme_unmap_data(dev, req); + if (blk_rq_nr_phys_segments(req)) + nvme_unmap_data(req); out_free_cmd: nvme_cleanup_cmd(req); return ret; } -/* - * NOTE: ns is NULL when called on the admin queue. - */ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -891,7 +1265,7 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) return nvme_fail_nonready_command(&dev->ctrl, req); - ret = nvme_prep_rq(dev, req); + ret = nvme_prep_rq(req); if (unlikely(ret)) return ret; spin_lock(&nvmeq->sq_lock); @@ -901,11 +1275,15 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; } -static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) +static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct rq_list *rqlist) { + struct request *req; + + if (rq_list_empty(rqlist)) + return; + spin_lock(&nvmeq->sq_lock); - while (!rq_list_empty(*rqlist)) { - struct request *req = rq_list_pop(rqlist); + while ((req = rq_list_pop(rqlist))) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_sq_copy_cmd(nvmeq, &iod->cmd); @@ -925,53 +1303,38 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) return false; - return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; + return nvme_prep_rq(req) == BLK_STS_OK; } -static void nvme_queue_rqs(struct request **rqlist) +static void nvme_queue_rqs(struct rq_list *rqlist) { - struct request *req, *next, *prev = NULL; - struct request *requeue_list = NULL; - - rq_list_for_each_safe(rqlist, req, next) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - - if (!nvme_prep_rq_batch(nvmeq, req)) { - /* detach 'req' and add to remainder list */ - rq_list_move(rqlist, &requeue_list, req, prev); + struct rq_list submit_list = { }; + struct rq_list requeue_list = { }; + struct nvme_queue *nvmeq = NULL; + struct request *req; - req = prev; - if (!req) - continue; - } + while ((req = rq_list_pop(rqlist))) { + if (nvmeq && nvmeq != req->mq_hctx->driver_data) + nvme_submit_cmds(nvmeq, &submit_list); + nvmeq = req->mq_hctx->driver_data; - if (!next || req->mq_hctx != next->mq_hctx) { - /* detach rest of list, and submit */ - req->rq_next = NULL; - nvme_submit_cmds(nvmeq, rqlist); - *rqlist = next; - prev = NULL; - } else - prev = req; + if (nvme_prep_rq_batch(nvmeq, req)) + rq_list_add_tail(&submit_list, req); + else + rq_list_add_tail(&requeue_list, req); } + if (nvmeq) + nvme_submit_cmds(nvmeq, &submit_list); *rqlist = requeue_list; } static __always_inline void nvme_pci_unmap_rq(struct request *req) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; - struct nvme_dev *dev = nvmeq->dev; - - if (blk_integrity_rq(req)) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - - dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req)->bv_len, rq_dma_dir(req)); - } - + if (blk_integrity_rq(req)) + nvme_unmap_metadata(req); if (blk_rq_nr_phys_segments(req)) - nvme_unmap_data(dev, req); + nvme_unmap_data(req); } static void nvme_pci_complete_rq(struct request *req) @@ -1038,8 +1401,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result) && - !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, - nvme_pci_complete_batch)) + !blk_mq_add_to_batch(req, iob, + nvme_req(req)->status != NVME_SC_SUCCESS, + nvme_pci_complete_batch)) nvme_pci_complete_rq(req); } @@ -1055,13 +1419,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) } } -static inline int nvme_poll_cq(struct nvme_queue *nvmeq, - struct io_comp_batch *iob) +static inline bool nvme_poll_cq(struct nvme_queue *nvmeq, + struct io_comp_batch *iob) { - int found = 0; + bool found = false; while (nvme_cqe_pending(nvmeq)) { - found++; + found = true; /* * load-load control dependency between phase and the rest of * the cqe requires a full read memory barrier @@ -1082,7 +1446,7 @@ static irqreturn_t nvme_irq(int irq, void *data) DEFINE_IO_COMP_BATCH(iob); if (nvme_poll_cq(nvmeq, &iob)) { - if (!rq_list_empty(iob.req_list)) + if (!rq_list_empty(&iob.req_list)) nvme_pci_complete_batch(&iob); return IRQ_HANDLED; } @@ -1109,7 +1473,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + spin_lock(&nvmeq->cq_poll_lock); nvme_poll_cq(nvmeq, NULL); + spin_unlock(&nvmeq->cq_poll_lock); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -1143,6 +1509,41 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) spin_unlock(&nvmeq->sq_lock); } +static int nvme_pci_subsystem_reset(struct nvme_ctrl *ctrl) +{ + struct nvme_dev *dev = to_nvme_dev(ctrl); + int ret = 0; + + /* + * Taking the shutdown_lock ensures the BAR mapping is not being + * altered by reset_work. Holding this lock before the RESETTING state + * change, if successful, also ensures nvme_remove won't be able to + * proceed to iounmap until we're done. + */ + mutex_lock(&dev->shutdown_lock); + if (!dev->bar_mapped_size) { + ret = -ENODEV; + goto unlock; + } + + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) { + ret = -EBUSY; + goto unlock; + } + + writel(NVME_SUBSYS_RESET, dev->bar + NVME_REG_NSSR); + nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE); + + /* + * Read controller status to flush the previous write and trigger a + * pcie read error. + */ + readl(dev->bar + NVME_REG_CSTS); +unlock: + mutex_unlock(&dev->shutdown_lock); + return ret; +} + static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) { struct nvme_command c = { }; @@ -1274,7 +1675,7 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) dev_warn(dev->ctrl.device, "Does your device have a faulty power saving mode enabled?\n"); dev_warn(dev->ctrl.device, - "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off pcie_port_pm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req) @@ -1284,9 +1685,20 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) struct nvme_dev *dev = nvmeq->dev; struct request *abort_req; struct nvme_command cmd = { }; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 csts = readl(dev->bar + NVME_REG_CSTS); u8 opcode; + /* + * Shutdown the device immediately if we see it is disconnected. This + * unblocks PCIe error handling if the nvme driver is waiting in + * error_resume for a device that has been removed. We can't unbind the + * driver while the driver's error callback is waiting to complete, so + * we're relying on a timeout to break that deadlock if a removal + * occurs while reset work is running. + */ + if (pci_dev_is_disconnected(pdev)) + nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); if (nvme_state_terminal(&dev->ctrl)) goto disable; @@ -1294,7 +1706,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * the recovery mechanism will surely fail. */ mb(); - if (pci_channel_offline(to_pci_dev(dev->dev))) + if (pci_channel_offline(pdev)) return BLK_EH_RESET_TIMER; /* @@ -1349,7 +1761,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) * returned to the driver, or if this is the admin queue. */ opcode = nvme_req(req)->cmd->common.opcode; - if (!nvmeq->qid || iod->aborted) { + if (!nvmeq->qid || (iod->flags & IOD_ABORTED)) { dev_warn(dev->ctrl.device, "I/O tag %d (%04x) opcode %#x (%s) QID %d timeout, reset controller\n", req->tag, nvme_cid(req), opcode, @@ -1362,7 +1774,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) atomic_inc(&dev->ctrl.abort_limit); return BLK_EH_RESET_TIMER; } - iod->aborted = true; + iod->flags |= IOD_ABORTED; cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.cid = nvme_cid(req); @@ -1747,8 +2159,28 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) * might be pointing at! */ result = nvme_disable_ctrl(&dev->ctrl, false); - if (result < 0) - return result; + if (result < 0) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + + /* + * The NVMe Controller Reset method did not get an expected + * CSTS.RDY transition, so something with the device appears to + * be stuck. Use the lower level and bigger hammer PCIe + * Function Level Reset to attempt restoring the device to its + * initial state, and try again. + */ + result = pcie_reset_flr(pdev, false); + if (result < 0) + return result; + + pci_restore_state(pdev); + result = nvme_disable_ctrl(&dev->ctrl, false); + if (result < 0) + return result; + + dev_info(dev->ctrl.device, + "controller reset completed after pcie flr\n"); + } result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (result) @@ -1856,6 +2288,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) return; /* + * Controllers may support a CMB size larger than their BAR, for + * example, due to being behind a bridge. Reduce the CMB to the + * reported size of the BAR + */ + size = min(size, bar_size - offset); + + if (!IS_ALIGNED(size, memremap_compat_align()) || + !IS_ALIGNED(pci_resource_start(pdev, bar), + memremap_compat_align())) + return; + + /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: */ @@ -1865,17 +2309,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) dev->bar + NVME_REG_CMBMSC); } - /* - * Controllers may support a CMB size larger than their BAR, - * for example, due to being behind a bridge. Reduce the CMB to - * the reported size of the BAR - */ - if (size > bar_size - offset) - size = bar_size - offset; - if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); + hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); return; } @@ -1885,8 +2322,6 @@ static void nvme_map_cmb(struct nvme_dev *dev) if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) pci_p2pmem_publish(pdev, true); - - nvme_update_attrs(dev); } static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) @@ -1915,7 +2350,7 @@ static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) return ret; } -static void nvme_free_host_mem(struct nvme_dev *dev) +static void nvme_free_host_mem_multi(struct nvme_dev *dev) { int i; @@ -1930,18 +2365,54 @@ static void nvme_free_host_mem(struct nvme_dev *dev) kfree(dev->host_mem_desc_bufs); dev->host_mem_desc_bufs = NULL; - dma_free_coherent(dev->dev, - dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), +} + +static void nvme_free_host_mem(struct nvme_dev *dev) +{ + if (dev->hmb_sgt) + dma_free_noncontiguous(dev->dev, dev->host_mem_size, + dev->hmb_sgt, DMA_BIDIRECTIONAL); + else + nvme_free_host_mem_multi(dev); + + dma_free_coherent(dev->dev, dev->host_mem_descs_size, dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->host_mem_descs_size = 0; dev->nr_host_mem_descs = 0; } -static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, +static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size) +{ + dev->hmb_sgt = dma_alloc_noncontiguous(dev->dev, size, + DMA_BIDIRECTIONAL, GFP_KERNEL, 0); + if (!dev->hmb_sgt) + return -ENOMEM; + + dev->host_mem_descs = dma_alloc_coherent(dev->dev, + sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, + GFP_KERNEL); + if (!dev->host_mem_descs) { + dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, + DMA_BIDIRECTIONAL); + dev->hmb_sgt = NULL; + return -ENOMEM; + } + dev->host_mem_size = size; + dev->host_mem_descs_size = sizeof(*dev->host_mem_descs); + dev->nr_host_mem_descs = 1; + + dev->host_mem_descs[0].addr = + cpu_to_le64(dev->hmb_sgt->sgl->dma_address); + dev->host_mem_descs[0].size = cpu_to_le32(size / NVME_CTRL_PAGE_SIZE); + return 0; +} + +static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred, u32 chunk_size) { struct nvme_host_mem_buf_desc *descs; - u32 max_entries, len; + u32 max_entries, len, descs_size; dma_addr_t descs_dma; int i = 0; void **bufs; @@ -1954,8 +2425,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) max_entries = dev->ctrl.hmmaxd; - descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), - &descs_dma, GFP_KERNEL); + descs_size = max_entries * sizeof(*descs); + descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, + GFP_KERNEL); if (!descs) goto out; @@ -1984,22 +2456,14 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, dev->host_mem_size = size; dev->host_mem_descs = descs; dev->host_mem_descs_dma = descs_dma; + dev->host_mem_descs_size = descs_size; dev->host_mem_desc_bufs = bufs; return 0; out_free_bufs: - while (--i >= 0) { - size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; - - dma_free_attrs(dev->dev, size, bufs[i], - le64_to_cpu(descs[i].addr), - DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); - } - kfree(bufs); out_free_descs: - dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, - descs_dma); + dma_free_coherent(dev->dev, descs_size, descs, descs_dma); out: dev->host_mem_descs = NULL; return -ENOMEM; @@ -2007,13 +2471,23 @@ out: static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) { + unsigned long dma_merge_boundary = dma_get_merge_boundary(dev->dev); u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); u64 chunk_size; + /* + * If there is an IOMMU that can merge pages, try a virtually + * non-contiguous allocation for a single segment first. + */ + if (dma_merge_boundary && (PAGE_SIZE & dma_merge_boundary) == 0) { + if (!nvme_alloc_host_mem_single(dev, preferred)) + return 0; + } + /* start big and work our way down */ for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { - if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { + if (!nvme_alloc_host_mem_multi(dev, preferred, chunk_size)) { if (!min || dev->host_mem_size >= min) return 0; nvme_free_host_mem(dev); @@ -2061,8 +2535,10 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) } dev_info(dev->ctrl.device, - "allocated %lld MiB host memory buffer.\n", - dev->host_mem_size >> ilog2(SZ_1M)); + "allocated %lld MiB host memory buffer (%u segment%s).\n", + dev->host_mem_size >> ilog2(SZ_1M), + dev->nr_host_mem_descs, + str_plural(dev->nr_host_mem_descs)); } ret = nvme_set_host_mem(dev, enable_bits); @@ -2076,7 +2552,7 @@ static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, { struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); - return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", + return sysfs_emit(buf, "cmbloc : 0x%08x\ncmbsz : 0x%08x\n", ndev->cmbloc, ndev->cmbsz); } static DEVICE_ATTR_RO(cmb); @@ -2263,7 +2739,8 @@ static unsigned int nvme_max_io_queues(struct nvme_dev *dev) */ if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) return 1; - return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; + return blk_mq_num_possible_queues(0) + dev->nr_write_queues + + dev->nr_poll_queues; } static int nvme_setup_io_queues(struct nvme_dev *dev) @@ -2470,11 +2947,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) return 1; } -static void nvme_pci_update_nr_queues(struct nvme_dev *dev) +static bool nvme_pci_update_nr_queues(struct nvme_dev *dev) { + if (!dev->ctrl.tagset) { + nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, + nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); + return true; + } + + /* Give up if we are racing with nvme_dev_disable() */ + if (!mutex_trylock(&dev->shutdown_lock)) + return false; + + /* Check if nvme_dev_disable() has been executed already */ + if (!dev->online_queues) { + mutex_unlock(&dev->shutdown_lock); + return false; + } + blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); /* free previously allocated queues that are no longer usable */ nvme_free_queues(dev, dev->online_queues); + mutex_unlock(&dev->shutdown_lock); + return true; } static int nvme_pci_enable(struct nvme_dev *dev) @@ -2489,6 +2984,7 @@ static int nvme_pci_enable(struct nvme_dev *dev) pci_set_master(pdev); if (readl(dev->bar + NVME_REG_CSTS) == -1) { + dev_dbg(dev->ctrl.device, "reading CSTS register failed\n"); result = -ENODEV; goto disable; } @@ -2521,15 +3017,8 @@ static int nvme_pci_enable(struct nvme_dev *dev) else dev->io_sqes = NVME_NVM_IOSQES; - /* - * Temporary fix for the Apple controller found in the MacBook8,1 and - * some MacBook7,1 to avoid controller resets and data loss. - */ - if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { + if (dev->ctrl.quirks & NVME_QUIRK_QDEPTH_ONE) { dev->q_depth = 2; - dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " - "set queue depth=%u to work around controller resets\n", - dev->q_depth); } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && (pdev->device == 0xa821 || pdev->device == 0xa822) && NVME_CAP_MQES(dev->ctrl.cap) == 0) { @@ -2644,39 +3133,15 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) return 0; } -static int nvme_setup_prp_pools(struct nvme_dev *dev) -{ - dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, - NVME_CTRL_PAGE_SIZE, - NVME_CTRL_PAGE_SIZE, 0); - if (!dev->prp_page_pool) - return -ENOMEM; - - /* Optimisation for I/Os between 4k and 128k */ - dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, - 256, 256, 0); - if (!dev->prp_small_pool) { - dma_pool_destroy(dev->prp_page_pool); - return -ENOMEM; - } - return 0; -} - -static void nvme_release_prp_pools(struct nvme_dev *dev) -{ - dma_pool_destroy(dev->prp_page_pool); - dma_pool_destroy(dev->prp_small_pool); -} - static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { - size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; + size_t alloc_size = sizeof(struct nvme_dma_vec) * NVME_MAX_SEGS; - dev->iod_mempool = mempool_create_node(1, + dev->dmavec_mempool = mempool_create_node(1, mempool_kmalloc, mempool_kfree, (void *)alloc_size, GFP_KERNEL, dev_to_node(dev->dev)); - if (!dev->iod_mempool) + if (!dev->dmavec_mempool) return -ENOMEM; return 0; } @@ -2743,18 +3208,25 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); if (result < 0) goto out; + nvme_update_attrs(dev); + result = nvme_setup_io_queues(dev); if (result) goto out; /* - * Freeze and update the number of I/O queues as thos might have + * Freeze and update the number of I/O queues as those might have * changed. If there are no I/O queues left after this reset, keep the * controller around but remove all namespaces. */ @@ -2762,7 +3234,8 @@ static void nvme_reset_work(struct work_struct *work) nvme_dbbuf_set(dev); nvme_unquiesce_io_queues(&dev->ctrl); nvme_wait_freeze(&dev->ctrl); - nvme_pci_update_nr_queues(dev); + if (!nvme_pci_update_nr_queues(dev)) + goto out; nvme_unfreeze(&dev->ctrl); } else { dev_warn(dev->ctrl.device, "IO queues lost\n"); @@ -2849,6 +3322,14 @@ static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) return dma_pci_p2pdma_supported(dev->dev); } +static unsigned long nvme_pci_get_virt_boundary(struct nvme_ctrl *ctrl, + bool is_admin) +{ + if (!nvme_ctrl_sgl_supported(ctrl) || is_admin) + return NVME_CTRL_PAGE_SIZE - 1; + return 0; +} + static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, @@ -2859,9 +3340,11 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .reg_read64 = nvme_pci_reg_read64, .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, + .subsystem_reset = nvme_pci_subsystem_reset, .get_address = nvme_pci_get_address, .print_device_info = nvme_pci_print_device_info, .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, + .get_virt_boundary = nvme_pci_get_virt_boundary, }; static int nvme_dev_map(struct nvme_dev *dev) @@ -2923,15 +3406,37 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) /* * Exclude some Kingston NV1 and A2000 devices from * NVME_QUIRK_SIMPLE_SUSPEND. Do a full suspend to save a - * lot fo energy with s2idle sleep on some TUXEDO platforms. + * lot of energy with s2idle sleep on some TUXEDO platforms. */ if (dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || dmi_match(DMI_BOARD_NAME, "NS5x_7xAU") || dmi_match(DMI_BOARD_NAME, "NS5x_7xPU") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1")) return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; + } else if (pdev->vendor == 0x144d && pdev->device == 0xa80d) { + /* + * Exclude Samsung 990 Evo from NVME_QUIRK_SIMPLE_SUSPEND + * because of high power consumption (> 2 Watt) in s2idle + * sleep. Only some boards with Intel CPU are affected. + * (Note for testing: Samsung 990 Evo Plus has same PCI ID) + */ + if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || + dmi_match(DMI_BOARD_NAME, "GMxPXxx") || + dmi_match(DMI_BOARD_NAME, "GXxMRXx") || + dmi_match(DMI_BOARD_NAME, "NS5X_NS7XAU") || + dmi_match(DMI_BOARD_NAME, "PH4PG31") || + dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || + dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) + return NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND; } + /* + * NVMe SSD drops off the PCIe bus after system idle + * for 10 hours on a Lenovo N60z board. + */ + if (dmi_match(DMI_BOARD_NAME, "LXKT-ZXEG-N6")) + return NVME_QUIRK_NO_APST; + return 0; } @@ -2943,7 +3448,8 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, struct nvme_dev *dev; int ret = -ENOMEM; - dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); + dev = kzalloc_node(struct_size(dev, descriptor_pools, nr_node_ids), + GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); @@ -2988,13 +3494,9 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, * over a single page. */ dev->ctrl.max_hw_sectors = min_t(u32, - NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); + NVME_MAX_BYTES >> SECTOR_SHIFT, + dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; - - /* - * There is no support for SGLs for metadata (yet), so we are limited to - * a single integrity segment for the separate metadata pointer. - */ dev->ctrl.max_integrity_segments = 1; return dev; @@ -3015,17 +3517,17 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (IS_ERR(dev)) return PTR_ERR(dev); - result = nvme_dev_map(dev); + result = nvme_add_ctrl(&dev->ctrl); if (result) - goto out_uninit_ctrl; + goto out_put_ctrl; - result = nvme_setup_prp_pools(dev); + result = nvme_dev_map(dev); if (result) - goto out_dev_unmap; + goto out_uninit_ctrl; result = nvme_pci_alloc_iod_mempool(dev); if (result) - goto out_release_prp_pools; + goto out_dev_unmap; dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); @@ -3053,12 +3555,19 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto out_disable; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); if (result < 0) goto out_disable; + nvme_update_attrs(dev); + result = nvme_setup_io_queues(dev); if (result) goto out_disable; @@ -3094,14 +3603,14 @@ out_disable: nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); out_release_iod_mempool: - mempool_destroy(dev->iod_mempool); -out_release_prp_pools: - nvme_release_prp_pools(dev); + mempool_destroy(dev->dmavec_mempool); out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: nvme_uninit_ctrl(&dev->ctrl); +out_put_ctrl: nvme_put_ctrl(&dev->ctrl); + dev_err_probe(&pdev->dev, result, "probe failed\n"); return result; } @@ -3158,8 +3667,8 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dev_remove_admin(dev); nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); - mempool_destroy(dev->iod_mempool); - nvme_release_prp_pools(dev); + mempool_destroy(dev->dmavec_mempool); + nvme_release_descriptor_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); } @@ -3328,7 +3837,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); - if (!nvme_try_sched_reset(&dev->ctrl)) + if (nvme_try_sched_reset(&dev->ctrl)) nvme_unquiesce_io_queues(&dev->ctrl); return PCI_ERS_RESULT_RECOVERED; } @@ -3357,12 +3866,10 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ .driver_data = NVME_QUIRK_STRIPE_SIZE | - NVME_QUIRK_DEALLOCATE_ZEROES | NVME_QUIRK_IGNORE_DEV_SUBNQN | NVME_QUIRK_BOGUS_NID, }, { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ - .driver_data = NVME_QUIRK_STRIPE_SIZE | - NVME_QUIRK_DEALLOCATE_ZEROES, }, + .driver_data = NVME_QUIRK_STRIPE_SIZE, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_MEDIUM_PRIO_SQ | @@ -3376,6 +3883,11 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_BOGUS_NID, }, { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */ + .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, }, + { PCI_DEVICE(0x126f, 0x1001), /* Silicon Motion generic */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_BOGUS_NID, }, @@ -3399,6 +3911,9 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x15b7, 0x5008), /* Sandisk SN530 */ .driver_data = NVME_QUIRK_BROKEN_MSI }, + { PCI_DEVICE(0x15b7, 0x5009), /* Sandisk SN550 */ + .driver_data = NVME_QUIRK_BROKEN_MSI | + NVME_QUIRK_NO_DEEPEST_PS }, { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ @@ -3476,12 +3991,16 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ @@ -3510,7 +4029,12 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), - .driver_data = NVME_QUIRK_SINGLE_VECTOR }, + /* + * Fix for the Apple controller found in the MacBook8,1 and + * some MacBook7,1 to avoid controller resets and data loss. + */ + .driver_data = NVME_QUIRK_SINGLE_VECTOR | + NVME_QUIRK_QDEPTH_ONE }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), .driver_data = NVME_QUIRK_SINGLE_VECTOR | @@ -3545,9 +4069,6 @@ static int __init nvme_init(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); - BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); - BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); - BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); return pci_register_driver(&nvme_driver); } diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index 8fa1ffcdaed4..ad2ecc2f49a9 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -5,7 +5,7 @@ */ #include <linux/blkdev.h> #include <linux/pr.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "nvme.h" @@ -72,18 +72,16 @@ static int nvme_send_ns_pr_command(struct nvme_ns *ns, struct nvme_command *c, return nvme_submit_sync_cmd(ns->queue, c, data, data_len); } -static int nvme_sc_to_pr_err(int nvme_sc) +static int nvme_status_to_pr_err(int status) { - if (nvme_is_path_error(nvme_sc)) + if (nvme_is_path_error(status)) return PR_STS_PATH_FAILED; - switch (nvme_sc & 0x7ff) { + switch (status & NVME_SCT_SC_MASK) { case NVME_SC_SUCCESS: return PR_STS_SUCCESS; case NVME_SC_RESERVATION_CONFLICT: return PR_STS_RESERVATION_CONFLICT; - case NVME_SC_ONCS_NOT_SUPPORTED: - return -EOPNOTSUPP; case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_INVALID_OPCODE: case NVME_SC_INVALID_FIELD: @@ -94,115 +92,144 @@ static int nvme_sc_to_pr_err(int nvme_sc) } } -static int nvme_send_pr_command(struct block_device *bdev, - struct nvme_command *c, void *data, unsigned int data_len) +static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10, + u32 cdw11, u8 op, void *data, unsigned int data_len) { - if (nvme_disk_is_ns_head(bdev->bd_disk)) - return nvme_send_ns_head_pr_command(bdev, c, data, data_len); + struct nvme_command c = { 0 }; + + c.common.opcode = op; + c.common.cdw10 = cpu_to_le32(cdw10); + c.common.cdw11 = cpu_to_le32(cdw11); - return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, - data_len); + if (nvme_disk_is_ns_head(bdev->bd_disk)) + return nvme_send_ns_head_pr_command(bdev, &c, data, data_len); + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, + data, data_len); } -static int nvme_pr_command(struct block_device *bdev, u32 cdw10, - u64 key, u64 sa_key, u8 op) +static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11, + u8 op, void *data, unsigned int data_len) { - struct nvme_command c = { }; - u8 data[16] = { 0, }; int ret; - put_unaligned_le64(key, &data[0]); - put_unaligned_le64(sa_key, &data[8]); - - c.common.opcode = op; - c.common.cdw10 = cpu_to_le32(cdw10); - - ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); - if (ret < 0) - return ret; - - return nvme_sc_to_pr_err(ret); + ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len); + return ret < 0 ? ret : nvme_status_to_pr_err(ret); } -static int nvme_pr_register(struct block_device *bdev, u64 old, - u64 new, unsigned flags) +static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, + unsigned int flags) { + struct nvmet_pr_register_data data = { 0 }; u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; - cdw10 = old ? 2 : 0; - cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; - cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); + data.crkey = cpu_to_le64(old_key); + data.nrkey = cpu_to_le64(new_key); + + cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE : + NVME_PR_REGISTER_ACT_REG; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0; + cdw10 |= NVME_PR_CPTPL_PERSIST; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register, + &data, sizeof(data)); } static int nvme_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, unsigned flags) { + struct nvmet_pr_acquire_data data = { 0 }; u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; - cdw10 = nvme_pr_type_from_blk(type) << 8; - cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); + data.crkey = cpu_to_le64(key); + + cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire, + &data, sizeof(data)); } static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, enum pr_type type, bool abort) { - u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); + struct nvmet_pr_acquire_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(old); + data.prkey = cpu_to_le64(new); - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); + cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT : + NVME_PR_ACQUIRE_ACT_PREEMPT; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire, + &data, sizeof(data)); } static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 0 : 1 << 3); + struct nvmet_pr_release_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(key); + + cdw10 = NVME_PR_RELEASE_ACT_CLEAR; + cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY; - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release, + &data, sizeof(data)); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); + struct nvmet_pr_release_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(key); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); + cdw10 = NVME_PR_RELEASE_ACT_RELEASE; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release, + &data, sizeof(data)); } static int nvme_pr_resv_report(struct block_device *bdev, void *data, u32 data_len, bool *eds) { - struct nvme_command c = { }; + u32 cdw10, cdw11; int ret; - c.common.opcode = nvme_cmd_resv_report; - c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); - c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); + cdw10 = nvme_bytes_to_numd(data_len); + cdw11 = NVME_EXTENDED_DATA_STRUCT; *eds = true; retry: - ret = nvme_send_pr_command(bdev, &c, data, data_len); + ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report, + data, data_len); if (ret == NVME_SC_HOST_ID_INCONSIST && - c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { - c.common.cdw11 = 0; + cdw11 == NVME_EXTENDED_DATA_STRUCT) { + cdw11 = 0; *eds = false; goto retry; } - if (ret < 0) - return ret; - - return nvme_sc_to_pr_err(ret); + return ret < 0 ? ret : nvme_status_to_pr_err(ret); } static int nvme_pr_read_keys(struct block_device *bdev, struct pr_keys *keys_info) { - u32 rse_len, num_keys = keys_info->num_keys; + size_t rse_len; + u32 num_keys = keys_info->num_keys; struct nvme_reservation_status_ext *rse; int ret, i; bool eds; @@ -212,6 +239,9 @@ static int nvme_pr_read_keys(struct block_device *bdev, * enough to get enough keys to fill the return keys buffer. */ rse_len = struct_size(rse, regctl_eds, num_keys); + if (rse_len > U32_MAX) + return -EINVAL; + rse = kzalloc(rse_len, GFP_KERNEL); if (!rse) return -ENOMEM; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 51a62b0c645a..35c0822edb2d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -18,7 +18,7 @@ #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/nvme.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> @@ -221,7 +221,7 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev, /* * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue - * lifetime. It's safe, since any chage in the underlying RDMA device + * lifetime. It's safe, since any change in the underlying RDMA device * will issue error recovery and queue re-creation. */ for (i = 0; i < ib_queue_size; i++) { @@ -800,7 +800,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, /* * Bind the async event SQE DMA mapping to the admin queue lifetime. - * It's safe, since any chage in the underlying RDMA device will issue + * It's safe, since any change in the underlying RDMA device will issue * error recovery and queue re-creation. */ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, @@ -877,7 +877,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) /* * Only start IO queues for which we have allocated the tagset - * and limitted it to the available queues. On reconnects, the + * and limited it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); @@ -1019,7 +1019,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) goto destroy_admin; } - if (!(ctrl->ctrl.sgls & (1 << 2))) { + if (!(ctrl->ctrl.sgls & NVME_CTRL_SGLS_KSDBDS)) { ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not supported!\n"); @@ -1051,7 +1051,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; } - if (ctrl->ctrl.sgls & (1 << 20)) + if (ctrl->ctrl.sgls & NVME_CTRL_SGLS_SAOS) ctrl->use_inline_data = true; if (ctrl->ctrl.queue_count > 1) { @@ -1091,13 +1091,7 @@ destroy_io: } destroy_admin: nvme_stop_keep_alive(&ctrl->ctrl); - nvme_quiesce_admin_queue(&ctrl->ctrl); - blk_sync_queue(ctrl->ctrl.admin_q); - nvme_rdma_stop_queue(&ctrl->queues[0]); - nvme_cancel_admin_tagset(&ctrl->ctrl); - if (new) - nvme_remove_admin_tag_set(&ctrl->ctrl); - nvme_rdma_destroy_admin_queue(ctrl); + nvme_rdma_teardown_admin_queue(ctrl, new); return ret; } @@ -1363,8 +1357,8 @@ static void nvme_rdma_set_sig_domain(struct blk_integrity *bi, if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; - domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); - domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); + domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); + domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; @@ -1482,8 +1476,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, if (ret) return -ENOMEM; - req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, - req->data_sgl.sg_table.sgl); + req->data_sgl.nents = blk_rq_map_sg(rq, req->data_sgl.sg_table.sgl); *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, rq_dma_dir(rq)); @@ -1496,7 +1489,7 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, req->metadata_sgl->sg_table.sgl = (struct scatterlist *)(req->metadata_sgl + 1); ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, - blk_rq_count_integrity_sg(rq->q, rq->bio), + rq->nr_integrity_segments, req->metadata_sgl->sg_table.sgl, NVME_INLINE_METADATA_SG_CNT); if (unlikely(ret)) { @@ -1504,8 +1497,8 @@ static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq, goto out_unmap_sg; } - req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, - rq->bio, req->metadata_sgl->sg_table.sgl); + req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq, + req->metadata_sgl->sg_table.sgl); *pi_count = ib_dma_map_sg(ibdev, req->metadata_sgl->sg_table.sgl, req->metadata_sgl->nents, @@ -1876,6 +1869,8 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) */ priv.hrqsize = cpu_to_le16(queue->queue_size); priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); + /* cntlid should only be set when creating an I/O queue */ + priv.cntlid = cpu_to_le16(ctrl->ctrl.cntlid); } ret = rdma_connect_locked(queue->cm_id, ¶m); @@ -2201,11 +2196,13 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_rdma_free_ctrl, .submit_async_event = nvme_rdma_submit_async_event, .delete_ctrl = nvme_rdma_delete_ctrl, .get_address = nvmf_get_address, .stop_ctrl = nvme_rdma_stop_ctrl, + .get_virt_boundary = nvme_get_virt_boundary, }; /* @@ -2237,12 +2234,11 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts) return found; } -static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, +static struct nvme_rdma_ctrl *nvme_rdma_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_rdma_ctrl *ctrl; int ret; - bool changed; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) @@ -2304,6 +2300,30 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, if (ret) goto out_kfree_queues; + return ctrl; + +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_rdma_ctrl *ctrl; + bool changed; + int ret; + + ctrl = nvme_rdma_alloc_ctrl(dev, opts); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); WARN_ON_ONCE(!changed); @@ -2322,15 +2342,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); -out_kfree_queues: - kfree(ctrl->queues); -out_free_ctrl: - kfree(ctrl); - return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_rdma_transport = { diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c index 3c55f7edd181..29430949ce2f 100644 --- a/drivers/nvme/host/sysfs.c +++ b/drivers/nvme/host/sysfs.c @@ -233,13 +233,12 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr, { struct nvme_ns_head *head = dev_to_ns_head(dev); struct gendisk *disk = dev_to_disk(dev); - struct block_device *bdev = disk->part0; int ret; - if (nvme_disk_is_ns_head(bdev->bd_disk)) + if (nvme_disk_is_ns_head(disk)) ret = ns_head_update_nuse(head); else - ret = ns_update_nuse(bdev->bd_disk->private_data); + ret = ns_update_nuse(disk->private_data); if (ret) return ret; @@ -259,6 +258,9 @@ static struct attribute *nvme_ns_attrs[] = { #ifdef CONFIG_NVME_MULTIPATH &dev_attr_ana_grpid.attr, &dev_attr_ana_state.attr, + &dev_attr_queue_depth.attr, + &dev_attr_numa_nodes.attr, + &dev_attr_delayed_removal_secs.attr, #endif &dev_attr_io_passthru_err_log_enabled.attr, NULL, @@ -291,6 +293,16 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) return 0; } + if (a == &dev_attr_queue_depth.attr || a == &dev_attr_numa_nodes.attr) { + if (nvme_disk_is_ns_head(dev_to_disk(dev))) + return 0; + } + if (a == &dev_attr_delayed_removal_secs.attr) { + struct gendisk *disk = dev_to_disk(dev); + + if (!nvme_disk_is_ns_head(disk)) + return 0; + } #endif return a->mode; } @@ -300,8 +312,50 @@ static const struct attribute_group nvme_ns_attr_group = { .is_visible = nvme_ns_attrs_are_visible, }; +#ifdef CONFIG_NVME_MULTIPATH +/* + * NOTE: The dummy attribute does not appear in sysfs. It exists solely to allow + * control over the visibility of the multipath sysfs node. Without at least one + * attribute defined in nvme_ns_mpath_attrs[], the sysfs implementation does not + * invoke the multipath_sysfs_group_visible() method. As a result, we would not + * be able to control the visibility of the multipath sysfs node. + */ +static struct attribute dummy_attr = { + .name = "dummy", +}; + +static struct attribute *nvme_ns_mpath_attrs[] = { + &dummy_attr, + NULL, +}; + +static bool multipath_sysfs_group_visible(struct kobject *kobj) +{ + struct device *dev = container_of(kobj, struct device, kobj); + + return nvme_disk_is_ns_head(dev_to_disk(dev)); +} + +static bool multipath_sysfs_attr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + return false; +} + +DEFINE_SYSFS_GROUP_VISIBLE(multipath_sysfs) + +const struct attribute_group nvme_ns_mpath_attr_group = { + .name = "multipath", + .attrs = nvme_ns_mpath_attrs, + .is_visible = SYSFS_GROUP_VISIBLE(multipath_sysfs), +}; +#endif + const struct attribute_group *nvme_ns_attr_groups[] = { &nvme_ns_attr_group, +#ifdef CONFIG_NVME_MULTIPATH + &nvme_ns_mpath_attr_group, +#endif NULL, }; @@ -665,19 +719,6 @@ static DEVICE_ATTR(dhchap_ctrl_secret, S_IRUGO | S_IWUSR, nvme_ctrl_dhchap_ctrl_secret_show, nvme_ctrl_dhchap_ctrl_secret_store); #endif -#ifdef CONFIG_NVME_TCP_TLS -static ssize_t tls_key_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct nvme_ctrl *ctrl = dev_get_drvdata(dev); - - if (!ctrl->tls_key) - return 0; - return sysfs_emit(buf, "%08x", key_serial(ctrl->tls_key)); -} -static DEVICE_ATTR_RO(tls_key); -#endif - static struct attribute *nvme_dev_attrs[] = { &dev_attr_reset_controller.attr, &dev_attr_rescan_controller.attr, @@ -705,9 +746,6 @@ static struct attribute *nvme_dev_attrs[] = { &dev_attr_dhchap_secret.attr, &dev_attr_dhchap_ctrl_secret.attr, #endif -#ifdef CONFIG_NVME_TCP_TLS - &dev_attr_tls_key.attr, -#endif &dev_attr_adm_passthru_err_log_enabled.attr, NULL }; @@ -738,11 +776,6 @@ static umode_t nvme_dev_attrs_are_visible(struct kobject *kobj, if (a == &dev_attr_dhchap_ctrl_secret.attr && !ctrl->opts) return 0; #endif -#ifdef CONFIG_NVME_TCP_TLS - if (a == &dev_attr_tls_key.attr && - (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp"))) - return 0; -#endif return a->mode; } @@ -753,8 +786,78 @@ const struct attribute_group nvme_dev_attrs_group = { }; EXPORT_SYMBOL_GPL(nvme_dev_attrs_group); +#ifdef CONFIG_NVME_TCP_TLS +static ssize_t tls_key_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!ctrl->tls_pskid) + return 0; + return sysfs_emit(buf, "%08x\n", ctrl->tls_pskid); +} +static DEVICE_ATTR_RO(tls_key); + +static ssize_t tls_configured_key_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct key *key = ctrl->opts->tls_key; + + return sysfs_emit(buf, "%08x\n", key_serial(key)); +} +static DEVICE_ATTR_RO(tls_configured_key); + +static ssize_t tls_keyring_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + struct key *keyring = ctrl->opts->keyring; + + return sysfs_emit(buf, "%s\n", keyring->description); +} +static DEVICE_ATTR_RO(tls_keyring); + +static struct attribute *nvme_tls_attrs[] = { + &dev_attr_tls_key.attr, + &dev_attr_tls_configured_key.attr, + &dev_attr_tls_keyring.attr, + NULL, +}; + +static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct nvme_ctrl *ctrl = dev_get_drvdata(dev); + + if (!ctrl->opts || strcmp(ctrl->opts->transport, "tcp")) + return 0; + + if (a == &dev_attr_tls_key.attr && + !ctrl->opts->tls && !ctrl->opts->concat) + return 0; + if (a == &dev_attr_tls_configured_key.attr && + (!ctrl->opts->tls_key || ctrl->opts->concat)) + return 0; + if (a == &dev_attr_tls_keyring.attr && + !ctrl->opts->keyring) + return 0; + + return a->mode; +} + +static const struct attribute_group nvme_tls_attrs_group = { + .attrs = nvme_tls_attrs, + .is_visible = nvme_tls_attrs_are_visible, +}; +#endif + const struct attribute_group *nvme_dev_attr_groups[] = { &nvme_dev_attrs_group, +#ifdef CONFIG_NVME_TCP_TLS + &nvme_tls_attrs_group, +#endif NULL, }; diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 8b5e4327fe83..69cb04406b47 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -8,7 +8,7 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/err.h> -#include <linux/key.h> +#include <linux/crc32.h> #include <linux/nvme-tcp.h> #include <linux/nvme-keyring.h> #include <net/sock.h> @@ -17,7 +17,6 @@ #include <net/tls_prot.h> #include <net/handshake.h> #include <linux/blk-mq.h> -#include <crypto/hash.h> #include <net/busy_poll.h> #include <trace/events/sock.h> @@ -54,6 +53,8 @@ MODULE_PARM_DESC(tls_handshake_timeout, "nvme TLS handshake timeout in seconds (default 10)"); #endif +static atomic_t nvme_tcp_cpu_queues[NR_CPUS]; + #ifdef CONFIG_DEBUG_LOCK_ALLOC /* lockdep can detect a circular dependency of the form * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock @@ -127,6 +128,7 @@ enum nvme_tcp_queue_flags { NVME_TCP_Q_ALLOCATED = 0, NVME_TCP_Q_LIVE = 1, NVME_TCP_Q_POLLING = 2, + NVME_TCP_Q_IO_CPU_SET = 3, }; enum nvme_tcp_recv_state { @@ -165,8 +167,9 @@ struct nvme_tcp_queue { bool hdr_digest; bool data_digest; - struct ahash_request *rcv_hash; - struct ahash_request *snd_hash; + bool tls_enabled; + u32 rcv_crc; + u32 snd_crc; __le32 exp_ddgst; __le32 recv_ddgst; struct completion tls_complete; @@ -213,12 +216,39 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) return queue - queue->ctrl->queues; } -static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl) +static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type) +{ + switch (type) { + case nvme_tcp_c2h_term: + case nvme_tcp_c2h_data: + case nvme_tcp_r2t: + case nvme_tcp_rsp: + return true; + default: + return false; + } +} + +/* + * Check if the queue is TLS encrypted + */ +static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) +{ + if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) + return 0; + + return queue->tls_enabled; +} + +/* + * Check if TLS is configured for the controller. + */ +static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl) { if (!IS_ENABLED(CONFIG_NVME_TCP_TLS)) return 0; - return ctrl->opts->tls; + return ctrl->opts->tls || ctrl->opts->concat; } static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) @@ -368,12 +398,12 @@ static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue) static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue) { - return !nvme_tcp_tls(&queue->ctrl->ctrl) && + return !nvme_tcp_queue_tls(queue) && nvme_tcp_queue_has_pending(queue); } static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, - bool sync, bool last) + bool last) { struct nvme_tcp_queue *queue = req->queue; bool empty; @@ -387,7 +417,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req, * are on the same cpu, so we don't introduce contention. */ if (queue->io_cpu == raw_smp_processor_id() && - sync && empty && mutex_trylock(&queue->send_mutex)) { + empty && mutex_trylock(&queue->send_mutex)) { nvme_tcp_send_all(queue); mutex_unlock(&queue->send_mutex); } @@ -422,36 +452,43 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue) return NULL; } - list_del(&req->entry); + list_del_init(&req->entry); + init_llist_node(&req->lentry); return req; } -static inline void nvme_tcp_ddgst_final(struct ahash_request *hash, - __le32 *dgst) +#define NVME_TCP_CRC_SEED (~0) + +static inline void nvme_tcp_ddgst_update(u32 *crcp, + struct page *page, size_t off, size_t len) { - ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0); - crypto_ahash_final(hash); + page += off / PAGE_SIZE; + off %= PAGE_SIZE; + while (len) { + const void *vaddr = kmap_local_page(page); + size_t n = min(len, (size_t)PAGE_SIZE - off); + + *crcp = crc32c(*crcp, vaddr + off, n); + kunmap_local(vaddr); + page++; + off = 0; + len -= n; + } } -static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, - struct page *page, off_t off, size_t len) +static inline __le32 nvme_tcp_ddgst_final(u32 crc) { - struct scatterlist sg; - - sg_init_table(&sg, 1); - sg_set_page(&sg, page, len, off); - ahash_request_set_crypt(hash, &sg, NULL, len); - crypto_ahash_update(hash); + return cpu_to_le32(~crc); } -static inline void nvme_tcp_hdgst(struct ahash_request *hash, - void *pdu, size_t len) +static inline __le32 nvme_tcp_hdgst(const void *pdu, size_t len) { - struct scatterlist sg; + return cpu_to_le32(~crc32c(NVME_TCP_CRC_SEED, pdu, len)); +} - sg_init_one(&sg, pdu, len); - ahash_request_set_crypt(hash, &sg, pdu + len, len); - crypto_ahash_digest(hash); +static inline void nvme_tcp_set_hdgst(void *pdu, size_t len) +{ + *(__le32 *)(pdu + len) = nvme_tcp_hdgst(pdu, len); } static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, @@ -469,8 +506,7 @@ static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue, } recv_digest = *(__le32 *)(pdu + hdr->hlen); - nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); - exp_digest = *(__le32 *)(pdu + hdr->hlen); + exp_digest = nvme_tcp_hdgst(pdu, pdu_len); if (recv_digest != exp_digest) { dev_err(queue->ctrl->ctrl.device, "header digest error: recv %#x expected %#x\n", @@ -496,7 +532,7 @@ static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu) nvme_tcp_queue_id(queue)); return -EPROTO; } - crypto_ahash_init(queue->rcv_hash); + queue->rcv_crc = NVME_TCP_CRC_SEED; return 0; } @@ -530,6 +566,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set, req->queue = queue; nvme_req(rq)->ctrl = &ctrl->ctrl; nvme_req(rq)->cmd = &pdu->cmd; + init_llist_node(&req->lentry); + INIT_LIST_HEAD(&req->entry); return 0; } @@ -734,17 +772,61 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return -EPROTO; } + if (llist_on_list(&req->lentry) || + !list_empty(&req->entry)) { + dev_err(queue->ctrl->ctrl.device, + "req %d unexpected r2t while processing request\n", + rq->tag); + return -EPROTO; + } + req->pdu_len = 0; req->h2cdata_left = r2t_length; req->h2cdata_offset = r2t_offset; req->ttag = pdu->ttag; nvme_tcp_setup_h2c_data_pdu(req); - nvme_tcp_queue_request(req, false, true); + + llist_add(&req->lentry, &queue->req_list); + queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); return 0; } +static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, + struct nvme_tcp_term_pdu *pdu) +{ + u16 fes; + const char *msg; + u32 plen = le32_to_cpu(pdu->hdr.plen); + + static const char * const msg_table[] = { + [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field", + [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error", + [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error", + [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range", + [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded", + [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter", + }; + + if (plen < NVME_TCP_MIN_C2HTERM_PLEN || + plen > NVME_TCP_MAX_C2HTERM_PLEN) { + dev_err(queue->ctrl->ctrl.device, + "Received a malformed C2HTermReq PDU (plen = %u)\n", + plen); + return; + } + + fes = le16_to_cpu(pdu->fes); + if (fes && fes < ARRAY_SIZE(msg_table)) + msg = msg_table[fes]; + else + msg = "Unknown"; + + dev_err(queue->ctrl->ctrl.device, + "Received C2HTermReq (FES = %s)\n", msg); +} + static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -766,6 +848,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return 0; hdr = queue->pdu; + if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) { + if (!nvme_tcp_recv_pdu_supported(hdr->type)) + goto unsupported_pdu; + + dev_err(queue->ctrl->ctrl.device, + "pdu type %d has unexpected header length (%d)\n", + hdr->type, hdr->hlen); + return -EPROTO; + } + + if (unlikely(hdr->type == nvme_tcp_c2h_term)) { + /* + * C2HTermReq never includes Header or Data digests. + * Skip the checks. + */ + nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); + return -EINVAL; + } + if (queue->hdr_digest) { ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); if (unlikely(ret)) @@ -789,10 +890,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, nvme_tcp_init_recv_ctx(queue); return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); default: - dev_err(queue->ctrl->ctrl.device, - "unsupported pdu type (%d)\n", hdr->type); - return -EINVAL; + goto unsupported_pdu; } + +unsupported_pdu: + dev_err(queue->ctrl->ctrl.device, + "unsupported pdu type (%d)\n", hdr->type); + return -EINVAL; } static inline void nvme_tcp_end_request(struct request *rq, u16 status) @@ -840,8 +944,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, iov_iter_count(&req->iter)); if (queue->data_digest) - ret = skb_copy_and_hash_datagram_iter(skb, *offset, - &req->iter, recv_len, queue->rcv_hash); + ret = skb_copy_and_crc32c_datagram_iter(skb, *offset, + &req->iter, recv_len, &queue->rcv_crc); else ret = skb_copy_datagram_iter(skb, *offset, &req->iter, recv_len); @@ -859,7 +963,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, if (!queue->data_remaining) { if (queue->data_digest) { - nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); + queue->exp_ddgst = nvme_tcp_ddgst_final(queue->rcv_crc); queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; } else { if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { @@ -977,6 +1081,9 @@ static void nvme_tcp_write_space(struct sock *sk) queue = sk->sk_user_data; if (likely(queue && sk_stream_is_writeable(sk))) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + /* Ensure pending TLS partial records are retried */ + if (nvme_tcp_queue_tls(queue)) + queue->write_space(sk); queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } read_unlock_bh(&sk->sk_callback_lock); @@ -1051,7 +1158,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) else msg.msg_flags |= MSG_MORE; - if (!sendpage_ok(page)) + if (!sendpages_ok(page, len, offset)) msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page, len, offset); @@ -1061,7 +1168,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) return ret; if (queue->data_digest) - nvme_tcp_ddgst_update(queue->snd_hash, page, + nvme_tcp_ddgst_update(&queue->snd_crc, page, offset, ret); /* @@ -1075,8 +1182,8 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) /* fully successful last send in current PDU */ if (last && ret == len) { if (queue->data_digest) { - nvme_tcp_ddgst_final(queue->snd_hash, - &req->ddgst); + req->ddgst = + nvme_tcp_ddgst_final(queue->snd_crc); req->state = NVME_TCP_SEND_DDGST; req->offset = 0; } else { @@ -1108,7 +1215,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) msg.msg_flags |= MSG_EOR; if (queue->hdr_digest && !req->offset) - nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvme_tcp_set_hdgst(pdu, sizeof(*pdu)); bvec_set_virt(&bvec, (void *)pdu + req->offset, len); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); @@ -1121,7 +1228,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) if (inline_data) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) - crypto_ahash_init(queue->snd_hash); + queue->snd_crc = NVME_TCP_CRC_SEED; } else { nvme_tcp_done_send_req(queue); } @@ -1143,7 +1250,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) int ret; if (queue->hdr_digest && !req->offset) - nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvme_tcp_set_hdgst(pdu, sizeof(*pdu)); if (!req->h2cdata_left) msg.msg_flags |= MSG_SPLICE_PAGES; @@ -1158,7 +1265,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (!len) { req->state = NVME_TCP_SEND_DATA; if (queue->data_digest) - crypto_ahash_init(queue->snd_hash); + queue->snd_crc = NVME_TCP_CRC_SEED; return 1; } req->offset += ret; @@ -1262,7 +1369,7 @@ static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue) queue->nr_cqe = 0; consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); release_sock(sk); - return consumed; + return consumed == -EAGAIN ? 0 : consumed; } static void nvme_tcp_io_work(struct work_struct *w) @@ -1290,6 +1397,11 @@ static void nvme_tcp_io_work(struct work_struct *w) else if (unlikely(result < 0)) return; + /* did we get some space after spending time in recv? */ + if (nvme_tcp_queue_has_pending(queue) && + sk_stream_is_writeable(queue->sock->sk)) + pending = true; + if (!pending || !queue->rd_enabled) return; @@ -1298,41 +1410,6 @@ static void nvme_tcp_io_work(struct work_struct *w) queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); } -static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); - - ahash_request_free(queue->rcv_hash); - ahash_request_free(queue->snd_hash); - crypto_free_ahash(tfm); -} - -static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue) -{ - struct crypto_ahash *tfm; - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->snd_hash) - goto free_tfm; - ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); - - queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->rcv_hash) - goto free_snd_hash; - ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); - - return 0; -free_snd_hash: - ahash_request_free(queue->snd_hash); -free_tfm: - crypto_free_ahash(tfm); - return -ENOMEM; -} - static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) { struct nvme_tcp_request *async = &ctrl->async_req; @@ -1365,9 +1442,6 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; - if (queue->hdr_digest || queue->data_digest) - nvme_tcp_free_crypto(queue); - page_frag_cache_drain(&queue->pf_cache); noreclaim_flag = memalloc_noreclaim_save(); @@ -1427,19 +1501,22 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) memset(&msg, 0, sizeof(msg)); iov.iov_base = icresp; iov.iov_len = sizeof(*icresp); - if (nvme_tcp_tls(&queue->ctrl->ctrl)) { + if (nvme_tcp_queue_tls(queue)) { msg.msg_control = cbuf; msg.msg_controllen = sizeof(cbuf); } + msg.msg_flags = MSG_WAITALL; ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, iov.iov_len, msg.msg_flags); + if (ret >= 0 && ret < sizeof(*icresp)) + ret = -ECONNRESET; if (ret < 0) { pr_warn("queue %d: failed to receive icresp, error %d\n", nvme_tcp_queue_id(queue), ret); goto free_icresp; } ret = -ENOTCONN; - if (nvme_tcp_tls(&queue->ctrl->ctrl)) { + if (nvme_tcp_queue_tls(queue)) { ctype = tls_get_record_type(queue->sock->sk, (struct cmsghdr *)cbuf); if (ctype != TLS_RECORD_TYPE_DATA) { @@ -1547,23 +1624,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) ctrl->io_queues[HCTX_TYPE_POLL]; } +/* + * Track the number of queues assigned to each cpu using a global per-cpu + * counter and select the least used cpu from the mq_map. Our goal is to spread + * different controllers I/O threads across different cpu cores. + * + * Note that the accounting is not 100% perfect, but we don't need to be, we're + * simply putting our best effort to select the best candidate cpu core that we + * find at any given point. + */ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; - int qid = nvme_tcp_queue_id(queue); - int n = 0; + struct blk_mq_tag_set *set = &ctrl->tag_set; + int qid = nvme_tcp_queue_id(queue) - 1; + unsigned int *mq_map = NULL; + int cpu, min_queues = INT_MAX, io_cpu; + + if (wq_unbound) + goto out; if (nvme_tcp_default_queue(queue)) - n = qid - 1; + mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map; else if (nvme_tcp_read_queue(queue)) - n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; + mq_map = set->map[HCTX_TYPE_READ].mq_map; else if (nvme_tcp_poll_queue(queue)) - n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - - ctrl->io_queues[HCTX_TYPE_READ] - 1; - if (wq_unbound) - queue->io_cpu = WORK_CPU_UNBOUND; - else - queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); + mq_map = set->map[HCTX_TYPE_POLL].mq_map; + + if (WARN_ON(!mq_map)) + goto out; + + /* Search for the least used cpu from the mq_map */ + io_cpu = WORK_CPU_UNBOUND; + for_each_online_cpu(cpu) { + int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]); + + if (mq_map[cpu] != qid) + continue; + if (num_queues < min_queues) { + io_cpu = cpu; + min_queues = num_queues; + } + } + if (io_cpu != WORK_CPU_UNBOUND) { + queue->io_cpu = io_cpu; + atomic_inc(&nvme_tcp_cpu_queues[io_cpu]); + set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags); + } +out: + dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n", + qid, queue->io_cpu); } static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) @@ -1581,13 +1691,16 @@ static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) goto out_complete; } - tls_key = key_lookup(pskid); + tls_key = nvme_tls_key_lookup(pskid); if (IS_ERR(tls_key)) { dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", qid, pskid); queue->tls_err = -ENOKEY; } else { - ctrl->ctrl.tls_key = tls_key; + queue->tls_enabled = true; + if (qid == 0) + ctrl->ctrl.tls_pskid = key_serial(tls_key); + key_put(tls_key); queue->tls_err = 0; } @@ -1635,9 +1748,14 @@ static int nvme_tcp_start_tls(struct nvme_ctrl *nctrl, qid, ret); tls_handshake_cancel(queue->sock->sk); } else { - dev_dbg(nctrl->device, - "queue %d: TLS handshake complete, error %d\n", - qid, queue->tls_err); + if (queue->tls_err) { + dev_err(nctrl->device, + "queue %d: TLS handshake complete, error %d\n", + qid, queue->tls_err); + } else { + dev_dbg(nctrl->device, + "queue %d: TLS handshake complete\n", qid); + } ret = queue->tls_err; } return ret; @@ -1664,7 +1782,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, queue->cmnd_capsule_len = sizeof(struct nvme_command) + NVME_TCP_ADMIN_CCSZ; - ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, + ret = sock_create_kern(current->nsproxy->net_ns, + ctrl->addr.ss_family, SOCK_STREAM, IPPROTO_TCP, &queue->sock); if (ret) { dev_err(nctrl->device, @@ -1677,6 +1796,8 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, ret = PTR_ERR(sock_file); goto err_destroy_mutex; } + + sk_net_refcnt_upgrade(queue->sock->sk); nvme_tcp_reclassify_socket(queue->sock); /* Single syn retry */ @@ -1704,7 +1825,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, queue->sock->sk->sk_allocation = GFP_ATOMIC; queue->sock->sk->sk_use_task_frag = false; - nvme_tcp_set_queue_io_cpu(queue); + queue->io_cpu = WORK_CPU_UNBOUND; queue->request = NULL; queue->data_remaining = 0; queue->ddgst_remaining = 0; @@ -1713,7 +1834,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, sk_set_memalloc(queue->sock->sk); if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { - ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, + ret = kernel_bind(queue->sock, (struct sockaddr_unsized *)&ctrl->src_addr, sizeof(ctrl->src_addr)); if (ret) { dev_err(nctrl->device, @@ -1739,27 +1860,19 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, queue->hdr_digest = nctrl->opts->hdr_digest; queue->data_digest = nctrl->opts->data_digest; - if (queue->hdr_digest || queue->data_digest) { - ret = nvme_tcp_alloc_crypto(queue); - if (ret) { - dev_err(nctrl->device, - "failed to allocate queue %d crypto\n", qid); - goto err_sock; - } - } rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) + nvme_tcp_hdgst_len(queue); queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); if (!queue->pdu) { ret = -ENOMEM; - goto err_crypto; + goto err_sock; } dev_dbg(nctrl->device, "connecting queue %d\n", nvme_tcp_queue_id(queue)); - ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, + ret = kernel_connect(queue->sock, (struct sockaddr_unsized *)&ctrl->addr, sizeof(ctrl->addr), 0); if (ret) { dev_err(nctrl->device, @@ -1768,7 +1881,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, } /* If PSKs are configured try to start TLS */ - if (IS_ENABLED(CONFIG_NVME_TCP_TLS) && pskid) { + if (nvme_tcp_tls_configured(nctrl) && pskid) { ret = nvme_tcp_start_tls(nctrl, queue, pskid); if (ret) goto err_init_connect; @@ -1786,9 +1899,6 @@ err_init_connect: kernel_sock_shutdown(queue->sock, SHUT_RDWR); err_rcv_pdu: kfree(queue->pdu); -err_crypto: - if (queue->hdr_digest || queue->data_digest) - nvme_tcp_free_crypto(queue); err_sock: /* ->sock will be released by fput() */ fput(queue->sock->file); @@ -1818,7 +1928,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) cancel_work_sync(&queue->io_work); } -static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1826,12 +1936,42 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; + if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags)) + atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]); + mutex_lock(&queue->queue_lock); if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) __nvme_tcp_stop_queue(queue); + /* Stopping the queue will disable TLS */ + queue->tls_enabled = false; mutex_unlock(&queue->queue_lock); } +static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int timeout = 100; + + while (timeout > 0) { + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || + !sk_wmem_alloc_get(queue->sock->sk)) + return; + msleep(2); + timeout -= 2; + } + dev_warn(nctrl->device, + "qid %d: timeout draining sock wmem allocation expired\n", + qid); +} + +static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +{ + nvme_tcp_stop_queue_nowait(nctrl, qid); + nvme_tcp_wait_queue(nctrl, qid); +} + + static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) { write_lock_bh(&queue->sock->sk->sk_callback_lock); @@ -1858,9 +1998,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) nvme_tcp_init_recv_ctx(queue); nvme_tcp_setup_sock_ops(queue); - if (idx) + if (idx) { + nvme_tcp_set_queue_io_cpu(queue); ret = nvmf_connect_io_queue(nctrl, idx); - else + } else ret = nvmf_connect_admin_queue(nctrl); if (!ret) { @@ -1898,7 +2039,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) int i; for (i = 1; i < ctrl->queue_count; i++) - nvme_tcp_stop_queue(ctrl, i); + nvme_tcp_stop_queue_nowait(ctrl, i); + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_wait_queue(ctrl, i); } static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, @@ -1925,16 +2068,17 @@ static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) int ret; key_serial_t pskid = 0; - if (nvme_tcp_tls(ctrl)) { + if (nvme_tcp_tls_configured(ctrl)) { if (ctrl->opts->tls_key) pskid = key_serial(ctrl->opts->tls_key); - else + else if (ctrl->opts->tls) { pskid = nvme_tls_psk_default(ctrl->opts->keyring, ctrl->opts->host->nqn, ctrl->opts->subsysnqn); - if (!pskid) { - dev_err(ctrl->device, "no valid PSK found\n"); - return -ENOKEY; + if (!pskid) { + dev_err(ctrl->device, "no valid PSK found\n"); + return -ENOKEY; + } } } @@ -1957,13 +2101,30 @@ static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) { int i, ret; - if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) { - dev_err(ctrl->device, "no PSK negotiated\n"); - return -ENOKEY; + if (nvme_tcp_tls_configured(ctrl)) { + if (ctrl->opts->concat) { + /* + * The generated PSK is stored in the + * fabric options + */ + if (!ctrl->opts->tls_key) { + dev_err(ctrl->device, "no PSK generated\n"); + return -ENOKEY; + } + if (ctrl->tls_pskid && + ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) { + dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid); + ctrl->tls_pskid = 0; + } + } else if (!ctrl->tls_pskid) { + dev_err(ctrl->device, "no PSK negotiated\n"); + return -ENOKEY; + } } + for (i = 1; i < ctrl->queue_count; i++) { ret = nvme_tcp_alloc_queue(ctrl, i, - key_serial(ctrl->tls_key)); + ctrl->tls_pskid); if (ret) goto out_free_queues; } @@ -2002,14 +2163,6 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) return __nvme_tcp_alloc_io_queues(ctrl); } -static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) -{ - nvme_tcp_stop_io_queues(ctrl); - if (remove) - nvme_remove_io_tag_set(ctrl); - nvme_tcp_free_io_queues(ctrl); -} - static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) { int ret, nr_queues; @@ -2029,7 +2182,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) /* * Only start IO queues for which we have allocated the tagset - * and limitted it to the available queues. On reconnects, the + * and limited it to the available queues. On reconnects, the * queue number might have changed. */ nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); @@ -2079,14 +2232,6 @@ out_free_io_queues: return ret; } -static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) -{ - nvme_tcp_stop_queue(ctrl, 0); - if (remove) - nvme_remove_admin_tag_set(ctrl); - nvme_tcp_free_admin_queue(ctrl); -} - static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) { int error; @@ -2108,6 +2253,9 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) if (error) goto out_cleanup_tagset; + if (ctrl->opts->concat && !ctrl->tls_pskid) + return 0; + error = nvme_enable_ctrl(ctrl); if (error) goto out_stop_queue; @@ -2141,9 +2289,16 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); nvme_cancel_admin_tagset(ctrl); - if (remove) + if (remove) { nvme_unquiesce_admin_queue(ctrl); - nvme_tcp_destroy_admin_queue(ctrl, remove); + nvme_remove_admin_tag_set(ctrl); + } + nvme_tcp_free_admin_queue(ctrl); + if (ctrl->tls_pskid) { + dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n", + ctrl->tls_pskid); + ctrl->tls_pskid = 0; + } } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, @@ -2151,14 +2306,15 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, { if (ctrl->queue_count <= 1) return; - nvme_quiesce_admin_queue(ctrl); nvme_quiesce_io_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); nvme_cancel_tagset(ctrl); - if (remove) + if (remove) { nvme_unquiesce_io_queues(ctrl); - nvme_tcp_destroy_io_queues(ctrl, remove); + nvme_remove_io_tag_set(ctrl); + } + nvme_tcp_free_io_queues(ctrl); } static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl, @@ -2184,6 +2340,27 @@ static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl, } } +/* + * The TLS key is set by secure concatenation after negotiation has been + * completed on the admin queue. We need to revoke the key when: + * - concatenation is enabled (otherwise it's a static key set by the user) + * and + * - the generated key is present in ctrl->tls_key (otherwise there's nothing + * to revoke) + * and + * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS + * negotiation has not run). + * + * We cannot always revoke the key as nvme_tcp_alloc_admin_queue() is called + * twice during secure concatenation, once on a 'normal' connection to run the + * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set), + * and once after the negotiation (which uses the key, so it _must_ be set). + */ +static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl) +{ + return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid; +} + static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) { struct nvmf_ctrl_options *opts = ctrl->opts; @@ -2193,6 +2370,16 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) if (ret) return ret; + if (ctrl->opts->concat && !ctrl->tls_pskid) { + /* See comments for nvme_tcp_key_revoke_needed() */ + dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n"); + nvme_stop_keep_alive(ctrl); + nvme_tcp_teardown_admin_queue(ctrl, false); + ret = nvme_tcp_configure_admin_queue(ctrl, false); + if (ret) + goto destroy_admin; + } + if (ctrl->icdoff) { ret = -EOPNOTSUPP; dev_err(ctrl->device, "icdoff is not supported!\n"); @@ -2247,11 +2434,13 @@ destroy_io: nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); nvme_cancel_tagset(ctrl); - nvme_tcp_destroy_io_queues(ctrl, new); + if (new) + nvme_remove_io_tag_set(ctrl); + nvme_tcp_free_io_queues(ctrl); } destroy_admin: nvme_stop_keep_alive(ctrl); - nvme_tcp_teardown_admin_queue(ctrl, false); + nvme_tcp_teardown_admin_queue(ctrl, new); return ret; } @@ -2287,6 +2476,8 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_tcp_ctrl, err_work); struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; + if (nvme_tcp_key_revoke_needed(ctrl)) + nvme_auth_revoke_tls_key(ctrl); nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); @@ -2327,6 +2518,8 @@ static void nvme_reset_ctrl_work(struct work_struct *work) container_of(work, struct nvme_ctrl, reset_work); int ret; + if (nvme_tcp_key_revoke_needed(ctrl)) + nvme_auth_revoke_tls_key(ctrl); nvme_stop_ctrl(ctrl); nvme_tcp_teardown_ctrl(ctrl, false); @@ -2428,8 +2621,10 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg) ctrl->async_req.offset = 0; ctrl->async_req.curr_bio = NULL; ctrl->async_req.data_len = 0; + init_llist_node(&ctrl->async_req.lentry); + INIT_LIST_HEAD(&ctrl->async_req.entry); - nvme_tcp_queue_request(&ctrl->async_req, true, true); + nvme_tcp_queue_request(&ctrl->async_req, true); } static void nvme_tcp_complete_timed_out(struct request *rq) @@ -2581,7 +2776,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(rq); - nvme_tcp_queue_request(req, true, bd->last); + nvme_tcp_queue_request(req, bd->last); return BLK_STS_OK; } @@ -2597,6 +2792,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; + int ret; if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; @@ -2604,9 +2800,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); - nvme_tcp_try_recv(queue); + ret = nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); - return queue->nr_cqe; + return ret < 0 ? ret : queue->nr_cqe; } static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) @@ -2617,10 +2813,11 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len = nvmf_get_address(ctrl, buf, size); + if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) + return len; + mutex_lock(&queue->queue_lock); - if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) - goto done; ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); if (ret > 0) { if (len > 0) @@ -2628,7 +2825,7 @@ static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", (len) ? "," : "", &src_addr); } -done: + mutex_unlock(&queue->queue_lock); return len; @@ -2662,11 +2859,13 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { .reg_read32 = nvmf_reg_read32, .reg_read64 = nvmf_reg_read64, .reg_write32 = nvmf_reg_write32, + .subsystem_reset = nvmf_subsystem_reset, .free_ctrl = nvme_tcp_free_ctrl, .submit_async_event = nvme_tcp_submit_async_event, .delete_ctrl = nvme_tcp_delete_ctrl, .get_address = nvme_tcp_get_address, .stop_ctrl = nvme_tcp_stop_ctrl, + .get_virt_boundary = nvmf_get_virt_boundary, }; static bool @@ -2686,7 +2885,7 @@ nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts) return found; } -static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, +static struct nvme_tcp_ctrl *nvme_tcp_alloc_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) { struct nvme_tcp_ctrl *ctrl; @@ -2761,6 +2960,28 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, if (ret) goto out_kfree_queues; + return ctrl; +out_kfree_queues: + kfree(ctrl->queues); +out_free_ctrl: + kfree(ctrl); + return ERR_PTR(ret); +} + +static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, + struct nvmf_ctrl_options *opts) +{ + struct nvme_tcp_ctrl *ctrl; + int ret; + + ctrl = nvme_tcp_alloc_ctrl(dev, opts); + if (IS_ERR(ctrl)) + return ERR_CAST(ctrl); + + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { WARN_ON_ONCE(1); ret = -EINTR; @@ -2782,15 +3003,11 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); if (ret > 0) ret = -EIO; return ERR_PTR(ret); -out_kfree_queues: - kfree(ctrl->queues); -out_free_ctrl: - kfree(ctrl); - return ERR_PTR(ret); } static struct nvmf_transport_ops nvme_tcp_transport = { @@ -2802,13 +3019,14 @@ static struct nvmf_transport_ops nvme_tcp_transport = { NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST | NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES | NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE | NVMF_OPT_TLS | - NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY, + NVMF_OPT_KEYRING | NVMF_OPT_TLS_KEY | NVMF_OPT_CONCAT, .create_ctrl = nvme_tcp_create_ctrl, }; static int __init nvme_tcp_init_module(void) { unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS; + int cpu; BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); @@ -2826,6 +3044,9 @@ static int __init nvme_tcp_init_module(void) if (!nvme_tcp_wq) return -ENOMEM; + for_each_possible_cpu(cpu) + atomic_set(&nvme_tcp_cpu_queues[cpu], 0); + nvmf_register_transport(&nvme_tcp_transport); return 0; } diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 0288315f0050..ad25ad1e4041 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -4,7 +4,7 @@ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "trace.h" static const char *nvme_trace_delete_sq(struct trace_seq *p, u8 *cdw10) @@ -228,27 +228,61 @@ static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10) { + static const char * const rrega_strs[] = { + [0x00] = "register", + [0x01] = "unregister", + [0x02] = "replace", + }; const char *ret = trace_seq_buffer_ptr(p); u8 rrega = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 ptpl = (cdw10[3] >> 6) & 0x3; + const char *rrega_str; + + if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega]) + rrega_str = rrega_strs[rrega]; + else + rrega_str = "reserved"; - trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u", - rrega, iekey, ptpl); + trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u", + rrega, rrega_str, iekey, ptpl); trace_seq_putc(p, 0); return ret; } +static const char * const rtype_strs[] = { + [0x00] = "reserved", + [0x01] = "write exclusive", + [0x02] = "exclusive access", + [0x03] = "write exclusive registrants only", + [0x04] = "exclusive access registrants only", + [0x05] = "write exclusive all registrants", + [0x06] = "exclusive access all registrants", +}; + static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10) { + static const char * const racqa_strs[] = { + [0x00] = "acquire", + [0x01] = "preempt", + [0x02] = "preempt and abort", + }; const char *ret = trace_seq_buffer_ptr(p); u8 racqa = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 rtype = cdw10[1]; + const char *racqa_str = "reserved"; + const char *rtype_str = "reserved"; - trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u", - racqa, iekey, rtype); + if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa]) + racqa_str = racqa_strs[racqa]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s", + racqa, racqa_str, iekey, rtype, rtype_str); trace_seq_putc(p, 0); return ret; @@ -256,13 +290,25 @@ static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10) static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10) { + static const char * const rrela_strs[] = { + [0x00] = "release", + [0x01] = "clear", + }; const char *ret = trace_seq_buffer_ptr(p); u8 rrela = cdw10[0] & 0x7; u8 iekey = (cdw10[0] >> 3) & 0x1; u8 rtype = cdw10[1]; + const char *rrela_str = "reserved"; + const char *rtype_str = "reserved"; + + if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela]) + rrela_str = rrela_strs[rrela]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; - trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u", - rrela, iekey, rtype); + trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s", + rrela, rrela_str, iekey, rtype, rtype_str); trace_seq_putc(p, 0); return ret; diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c index 77aa0f440a6d..deea2dbef5b8 100644 --- a/drivers/nvme/host/zns.c +++ b/drivers/nvme/host/zns.c @@ -108,13 +108,12 @@ free_data: void nvme_update_zone_info(struct nvme_ns *ns, struct queue_limits *lim, struct nvme_zone_info *zi) { - lim->zoned = 1; + lim->features |= BLK_FEAT_ZONED; lim->max_open_zones = zi->max_open_zones; lim->max_active_zones = zi->max_active_zones; - lim->max_zone_append_sectors = ns->ctrl->max_zone_append; + lim->max_hw_zone_append_sectors = ns->ctrl->max_zone_append; lim->chunk_sectors = ns->head->zsze = nvme_lba_to_sect(ns->head, zi->zone_size); - blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, ns->queue); } static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, @@ -147,17 +146,16 @@ static void *nvme_zns_alloc_report_buffer(struct nvme_ns *ns, return NULL; } -static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl, - struct nvme_ns_head *head, +static int nvme_zone_parse_entry(struct nvme_ns *ns, struct nvme_zone_descriptor *entry, - unsigned int idx, report_zones_cb cb, - void *data) + unsigned int idx, + struct blk_report_zones_args *args) { + struct nvme_ns_head *head = ns->head; struct blk_zone zone = { }; if ((entry->zt & 0xf) != NVME_ZONE_TYPE_SEQWRITE_REQ) { - dev_err(ctrl->device, "invalid zone type %#x\n", - entry->zt); + dev_err(ns->ctrl->device, "invalid zone type %#x\n", entry->zt); return -EINVAL; } @@ -171,11 +169,11 @@ static int nvme_zone_parse_entry(struct nvme_ctrl *ctrl, else zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp)); - return cb(&zone, idx, data); + return disk_report_zone(ns->disk, &zone, idx, args); } int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, - unsigned int nr_zones, report_zones_cb cb, void *data) + unsigned int nr_zones, struct blk_report_zones_args *args) { struct nvme_zone_report *report; struct nvme_command c = { }; @@ -214,9 +212,8 @@ int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, break; for (i = 0; i < nz && zone_idx < nr_zones; i++) { - ret = nvme_zone_parse_entry(ns->ctrl, ns->head, - &report->entries[i], - zone_idx, cb, data); + ret = nvme_zone_parse_entry(ns, &report->entries[i], + zone_idx, args); if (ret) goto out_free; zone_idx++; diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 872dd1a0acd8..4904097dfd49 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -3,10 +3,9 @@ config NVME_TARGET tristate "NVMe Target support" depends on BLOCK - depends on CONFIGFS_FS + select CONFIGFS_FS select NVME_KEYRING if NVME_TARGET_TCP_TLS select KEYS if NVME_TARGET_TCP_TLS - select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY select SGL_ALLOC help This enabled target side support for the NVMe protocol, that is @@ -18,6 +17,15 @@ config NVME_TARGET To configure the NVMe target you probably want to use the nvmetcli tool from http://git.infradead.org/users/hch/nvmetcli.git. +config NVME_TARGET_DEBUGFS + bool "NVMe Target debugfs support" + depends on NVME_TARGET + help + This enables debugfs support to display the connected controllers + to each subsystem + + If unsure, say N. + config NVME_TARGET_PASSTHRU bool "NVMe Target Passthrough support" depends on NVME_TARGET @@ -90,6 +98,7 @@ config NVME_TARGET_TCP_TLS bool "NVMe over Fabrics TCP target TLS encryption support" depends on NVME_TARGET_TCP select NET_HANDSHAKE + select TLS help Enables TLS encryption for the NVMe TCP target using the netlink handshake API. @@ -107,3 +116,14 @@ config NVME_TARGET_AUTH target side. If unsure, say N. + +config NVME_TARGET_PCI_EPF + tristate "NVMe PCI Endpoint Function target support" + depends on NVME_TARGET && PCI_ENDPOINT + depends on NVME_CORE=y || NVME_CORE=NVME_TARGET + help + This enables the NVMe PCI Endpoint Function target driver support, + which allows creating a NVMe PCI controller using an endpoint mode + capable PCI controller. + + If unsure, say N. diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index c66820102493..ed8522911d1f 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -8,9 +8,11 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o +obj-$(CONFIG_NVME_TARGET_PCI_EPF) += nvmet-pci-epf.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ - discovery.o io-cmd-file.o io-cmd-bdev.o + discovery.o io-cmd-file.o io-cmd-bdev.o pr.o +nvmet-$(CONFIG_NVME_TARGET_DEBUGFS) += debugfs.o nvmet-$(CONFIG_NVME_TARGET_PASSTHRU) += passthru.o nvmet-$(CONFIG_BLK_DEV_ZONED) += zns.o nvmet-$(CONFIG_NVME_TARGET_AUTH) += fabrics-cmd-auth.o auth.o @@ -19,4 +21,5 @@ nvmet-rdma-y += rdma.o nvmet-fc-y += fc.o nvme-fcloop-y += fcloop.o nvmet-tcp-y += tcp.o +nvmet-pci-epf-y += pci-epf.o nvmet-$(CONFIG_TRACING) += trace.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index f5b7054a4a05..3da31bb1183e 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -9,9 +9,136 @@ #include <linux/part_stat.h> #include <generated/utsrelease.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "nvmet.h" +static void nvmet_execute_delete_sq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!sqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_sqid(ctrl, sqid, false); + if (status != NVME_SC_SUCCESS) + goto complete; + + status = ctrl->ops->delete_sq(ctrl, sqid); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_create_sq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_command *cmd = req->cmd; + u16 sqid = le16_to_cpu(cmd->create_sq.sqid); + u16 cqid = le16_to_cpu(cmd->create_sq.cqid); + u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags); + u16 qsize = le16_to_cpu(cmd->create_sq.qsize); + u64 prp1 = le64_to_cpu(cmd->create_sq.prp1); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!sqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_sqid(ctrl, sqid, true); + if (status != NVME_SC_SUCCESS) + goto complete; + + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) { + pr_err("SQ %u: Invalid CQID %u\n", sqid, cqid); + goto complete; + } + + if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { + status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR; + goto complete; + } + + status = ctrl->ops->create_sq(ctrl, sqid, cqid, sq_flags, qsize, prp1); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_delete_cq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + status = nvmet_check_io_cqid(ctrl, cqid, false); + if (status != NVME_SC_SUCCESS) + goto complete; + + if (!ctrl->cqs[cqid] || nvmet_cq_in_use(ctrl->cqs[cqid])) { + /* Some SQs are still using this CQ */ + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = ctrl->ops->delete_cq(ctrl, cqid); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_create_cq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_command *cmd = req->cmd; + u16 cqid = le16_to_cpu(cmd->create_cq.cqid); + u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags); + u16 qsize = le16_to_cpu(cmd->create_cq.qsize); + u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector); + u64 prp1 = le64_to_cpu(cmd->create_cq.prp1); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + status = nvmet_check_io_cqid(ctrl, cqid, true); + if (status != NVME_SC_SUCCESS) + goto complete; + + if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { + status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR; + goto complete; + } + + status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize, + prp1, irq_vector); + +complete: + nvmet_req_complete(req, status); +} + u32 nvmet_get_log_page_len(struct nvme_command *cmd) { u32 len = le16_to_cpu(cmd->get_log_page.numdu); @@ -71,6 +198,35 @@ static void nvmet_execute_get_log_page_error(struct nvmet_req *req) nvmet_req_complete(req, 0); } +static void nvmet_execute_get_supported_log_pages(struct nvmet_req *req) +{ + struct nvme_supported_log *logs; + u16 status; + + logs = kzalloc(sizeof(*logs), GFP_KERNEL); + if (!logs) { + status = NVME_SC_INTERNAL; + goto out; + } + + logs->lids[NVME_LOG_SUPPORTED] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ERROR] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_SMART] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_FW_SLOT] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_CHANGED_NS] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_CMD_EFFECTS] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ENDURANCE_GROUP] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_ANA] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_FEATURES] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_RMI] = cpu_to_le32(NVME_LIDS_LSUPP); + logs->lids[NVME_LOG_RESERVATION] = cpu_to_le32(NVME_LIDS_LSUPP); + + status = nvmet_copy_to_sgl(req, 0, logs, sizeof(*logs)); + kfree(logs); +out: + nvmet_req_complete(req, status); +} + static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, struct nvme_smart_log *slog) { @@ -110,7 +266,7 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, unsigned long idx; ctrl = req->sq->ctrl; - xa_for_each(&ctrl->subsys->namespaces, idx, ns) { + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { /* we don't have the right data for file backed ns */ if (!ns->bdev) continue; @@ -130,6 +286,45 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req, return NVME_SC_SUCCESS; } +static void nvmet_execute_get_log_page_rmi(struct nvmet_req *req) +{ + struct nvme_rotational_media_log *log; + struct gendisk *disk; + u16 status; + + req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( + req->cmd->get_log_page.lsi)); + status = nvmet_req_find_ns(req); + if (status) + goto out; + + if (!req->ns->bdev || bdev_nonrot(req->ns->bdev)) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + if (req->transfer_len != sizeof(*log)) { + status = NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; + goto out; + } + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) + goto out; + + log->endgid = req->cmd->get_log_page.lsi; + disk = req->ns->bdev->bd_disk; + if (disk && disk->ia_ranges) + log->numa = cpu_to_le16(disk->ia_ranges->nr_ia_ranges); + else + log->numa = cpu_to_le16(1); + + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); + kfree(log); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) { struct nvme_smart_log *log; @@ -162,8 +357,18 @@ out: nvmet_req_complete(req, status); } -static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) +static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl, + struct nvme_effects_log *log) { + /* For a PCI target controller, advertize support for the . */ + if (nvmet_is_pci_ctrl(ctrl)) { + log->acs[nvme_admin_delete_sq] = + log->acs[nvme_admin_create_sq] = + log->acs[nvme_admin_delete_cq] = + log->acs[nvme_admin_create_cq] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + } + log->acs[nvme_admin_get_log_page] = log->acs[nvme_admin_identify] = log->acs[nvme_admin_abort_cmd] = @@ -172,10 +377,17 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) log->acs[nvme_admin_async_event] = log->acs[nvme_admin_keep_alive] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); +} +static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) +{ log->iocs[nvme_cmd_read] = log->iocs[nvme_cmd_flush] = log->iocs[nvme_cmd_dsm] = + log->iocs[nvme_cmd_resv_acquire] = + log->iocs[nvme_cmd_resv_register] = + log->iocs[nvme_cmd_resv_release] = + log->iocs[nvme_cmd_resv_report] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); log->iocs[nvme_cmd_write] = log->iocs[nvme_cmd_write_zeroes] = @@ -193,6 +405,7 @@ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) { + struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_effects_log *log; u16 status = NVME_SC_SUCCESS; @@ -204,6 +417,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) switch (req->cmd->get_log_page.csi) { case NVME_CSI_NVM: + nvmet_get_cmd_effects_admin(ctrl, log); nvmet_get_cmd_effects_nvm(log); break; case NVME_CSI_ZNS: @@ -211,6 +425,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) status = NVME_SC_INVALID_IO_CMD_SET; goto free; } + nvmet_get_cmd_effects_admin(ctrl, log); nvmet_get_cmd_effects_nvm(log); nvmet_get_cmd_effects_zns(log); break; @@ -259,9 +474,10 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, u32 count = 0; if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { - xa_for_each(&ctrl->subsys->namespaces, idx, ns) + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { if (ns->anagrpid == grpid) desc->nsids[count++] = cpu_to_le32(ns->nsid); + } } desc->grpid = cpu_to_le32(grpid); @@ -272,6 +488,49 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, return struct_size(desc, nsids, count); } +static void nvmet_execute_get_log_page_endgrp(struct nvmet_req *req) +{ + u64 host_reads, host_writes, data_units_read, data_units_written; + struct nvme_endurance_group_log *log; + u16 status; + + /* + * The target driver emulates each endurance group as its own + * namespace, reusing the nsid as the endurance group identifier. + */ + req->cmd->common.nsid = cpu_to_le32(le16_to_cpu( + req->cmd->get_log_page.lsi)); + status = nvmet_req_find_ns(req); + if (status) + goto out; + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) { + status = NVME_SC_INTERNAL; + goto out; + } + + if (!req->ns->bdev) + goto copy; + + host_reads = part_stat_read(req->ns->bdev, ios[READ]); + data_units_read = + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000); + host_writes = part_stat_read(req->ns->bdev, ios[WRITE]); + data_units_written = + DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000); + + put_unaligned_le64(host_reads, &log->hrc[0]); + put_unaligned_le64(data_units_read, &log->dur[0]); + put_unaligned_le64(host_writes, &log->hwc[0]); + put_unaligned_le64(data_units_written, &log->duw[0]); +copy: + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); + kfree(log); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) { struct nvme_ana_rsp_hdr hdr = { 0, }; @@ -317,12 +576,44 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_get_log_page_features(struct nvmet_req *req) +{ + struct nvme_supported_features_log *features; + u16 status; + + features = kzalloc(sizeof(*features), GFP_KERNEL); + if (!features) { + status = NVME_SC_INTERNAL; + goto out; + } + + features->fis[NVME_FEAT_NUM_QUEUES] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_KATO] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_ASYNC_EVENT] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_HOST_ID] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_CSCPE); + features->fis[NVME_FEAT_WRITE_PROTECT] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); + features->fis[NVME_FEAT_RESV_MASK] = + cpu_to_le32(NVME_FIS_FSUPP | NVME_FIS_NSCPE); + + status = nvmet_copy_to_sgl(req, 0, features, sizeof(*features)); + kfree(features); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_page(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd))) return; switch (req->cmd->get_log_page.lid) { + case NVME_LOG_SUPPORTED: + return nvmet_execute_get_supported_log_pages(req); case NVME_LOG_ERROR: return nvmet_execute_get_log_page_error(req); case NVME_LOG_SMART: @@ -338,13 +629,21 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) return nvmet_execute_get_log_changed_ns(req); case NVME_LOG_CMD_EFFECTS: return nvmet_execute_get_log_cmd_effects_ns(req); + case NVME_LOG_ENDURANCE_GROUP: + return nvmet_execute_get_log_page_endgrp(req); case NVME_LOG_ANA: return nvmet_execute_get_log_page_ana(req); + case NVME_LOG_FEATURES: + return nvmet_execute_get_log_page_features(req); + case NVME_LOG_RMI: + return nvmet_execute_get_log_page_rmi(req); + case NVME_LOG_RESERVATION: + return nvmet_execute_get_log_page_resv(req); } pr_debug("unhandled lid %d on qid %d\n", req->cmd->get_log_page.lid, req->sq->qid); req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } static void nvmet_execute_identify_ctrl(struct nvmet_req *req) @@ -352,7 +651,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_subsys *subsys = ctrl->subsys; struct nvme_id_ctrl *id; - u32 cmd_capsule_size; + u32 cmd_capsule_size, ctratt; u16 status = 0; if (!subsys->subsys_discovered) { @@ -367,9 +666,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) goto out; } - /* XXX: figure out how to assign real vendors IDs. */ - id->vid = 0; - id->ssvid = 0; + id->vid = cpu_to_le16(subsys->vendor_id); + id->ssvid = cpu_to_le16(subsys->subsys_vendor_id); memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, @@ -401,14 +699,16 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* XXX: figure out what to do about RTD3R/RTD3 */ id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); - id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | - NVME_CTRL_ATTR_TBKAS); + ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS; + if (nvmet_is_pci_ctrl(ctrl)) + ctratt |= NVME_CTRL_ATTR_RHII; + id->ctratt = cpu_to_le32(ctratt); id->oacs = 0; /* * We don't really have a practical limit on the number of abort - * comands. But we don't do anything useful for abort either, so + * commands. But we don't do anything useful for abort either, so * no point in allowing more abort commands than the spec requires. */ id->acl = 3; @@ -433,7 +733,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES); id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | - NVME_CTRL_ONCS_WRITE_ZEROES); + NVME_CTRL_ONCS_WRITE_ZEROES | + NVME_CTRL_ONCS_RESERVATIONS); /* XXX: don't report vwc if the underlying device is write through */ id->vwc = NVME_CTRL_VWC_PRESENT; @@ -445,11 +746,12 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->awun = 0; id->awupf = 0; - id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ + /* we always support SGLs */ + id->sgls = cpu_to_le32(NVME_CTRL_SGLS_BYTE_ALIGNED); if (ctrl->ops->flags & NVMF_KEYED_SGLS) - id->sgls |= cpu_to_le32(1 << 2); + id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_KSDBDS); if (req->port->inline_data_size) - id->sgls |= cpu_to_le32(1 << 20); + id->sgls |= cpu_to_le32(NVME_CTRL_SGLS_SAOS); strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); @@ -467,6 +769,13 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->msdbd = ctrl->ops->msdbd; + /* + * Endurance group identifier is 16 bits, so we can't let namespaces + * overflow that since we reuse the nsid + */ + BUILD_BUG_ON(NVMET_MAX_NAMESPACES > USHRT_MAX); + id->endgidmax = cpu_to_le16(NVMET_MAX_NAMESPACES); + id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); id->anatt = 10; /* random value */ id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); @@ -496,7 +805,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -551,6 +860,21 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) id->nmic = NVME_NS_NMIC_SHARED; id->anagrpid = cpu_to_le32(req->ns->anagrpid); + if (req->ns->pr.enable) + id->rescap = NVME_PR_SUPPORT_WRITE_EXCLUSIVE | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS | + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY | + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS | + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS | + NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF; + + /* + * Since we don't know any better, every namespace is its own endurance + * group. + */ + id->endgid = cpu_to_le16(req->ns->nsid); + memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid)); id->lbaf[0].ds = req->ns->blksize_shift; @@ -576,7 +900,40 @@ out: nvmet_req_complete(req, status); } -static void nvmet_execute_identify_nslist(struct nvmet_req *req) +static void nvmet_execute_identify_endgrp_list(struct nvmet_req *req) +{ + u16 min_endgid = le16_to_cpu(req->cmd->identify.cnssid); + static const int buf_size = NVME_IDENTIFY_DATA_SIZE; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_ns *ns; + unsigned long idx; + __le16 *list; + u16 status; + int i = 1; + + list = kzalloc(buf_size, GFP_KERNEL); + if (!list) { + status = NVME_SC_INTERNAL; + goto out; + } + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->nsid <= min_endgid) + continue; + + list[i++] = cpu_to_le16(ns->nsid); + if (i == buf_size / sizeof(__le16)) + break; + } + + list[0] = cpu_to_le16(i - 1); + status = nvmet_copy_to_sgl(req, 0, list, buf_size); + kfree(list); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_identify_nslist(struct nvmet_req *req, bool match_css) { static const int buf_size = NVME_IDENTIFY_DATA_SIZE; struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -587,15 +944,27 @@ static void nvmet_execute_identify_nslist(struct nvmet_req *req) u16 status = 0; int i = 0; + /* + * NSID values 0xFFFFFFFE and NVME_NSID_ALL are invalid + * See NVMe Base Specification, Active Namespace ID list (CNS 02h). + */ + if (min_nsid == 0xFFFFFFFE || min_nsid == NVME_NSID_ALL) { + req->error_loc = offsetof(struct nvme_identify, nsid); + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; + goto out; + } + list = kzalloc(buf_size, GFP_KERNEL); if (!list) { status = NVME_SC_INTERNAL; goto out; } - xa_for_each(&ctrl->subsys->namespaces, idx, ns) { + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { if (ns->nsid <= min_nsid) continue; + if (match_css && req->ns->csi != req->cmd->identify.csi) + continue; list[i++] = cpu_to_le32(ns->nsid); if (i == buf_size / sizeof(__le32)) break; @@ -662,7 +1031,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req) if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, off) != NVME_IDENTIFY_DATA_SIZE - off) - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; out: nvmet_req_complete(req, status); @@ -675,6 +1044,62 @@ static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req) nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm))); } +static void nvme_execute_identify_ns_nvm(struct nvmet_req *req) +{ + u16 status; + struct nvme_id_ns_nvm *id; + + status = nvmet_req_find_ns(req); + if (status) + goto out; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + status = NVME_SC_INTERNAL; + goto out; + } + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + kfree(id); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_id_cs_indep(struct nvmet_req *req) +{ + struct nvme_id_ns_cs_indep *id; + u16 status; + + status = nvmet_req_find_ns(req); + if (status) + goto out; + + id = kzalloc(sizeof(*id), GFP_KERNEL); + if (!id) { + status = NVME_SC_INTERNAL; + goto out; + } + + id->nstat = NVME_NSTAT_NRDY; + id->anagrpid = cpu_to_le32(req->ns->anagrpid); + id->nmic = NVME_NS_NMIC_SHARED; + if (req->ns->readonly) + id->nsattr |= NVME_NS_ATTR_RO; + if (req->ns->bdev && !bdev_nonrot(req->ns->bdev)) + id->nsfeat |= NVME_NS_ROTATIONAL; + /* + * We need flush command to flush the file's metadata, + * so report supporting vwc if backend is file, even + * though buffered_io is disable. + */ + if (req->ns->bdev && !bdev_write_cache(req->ns->bdev)) + id->nsfeat |= NVME_NS_VWC_NOT_PRESENT; + + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + kfree(id); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_identify(struct nvmet_req *req) { if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE)) @@ -688,7 +1113,7 @@ static void nvmet_execute_identify(struct nvmet_req *req) nvmet_execute_identify_ctrl(req); return; case NVME_ID_CNS_NS_ACTIVE_LIST: - nvmet_execute_identify_nslist(req); + nvmet_execute_identify_nslist(req, false); return; case NVME_ID_CNS_NS_DESC_LIST: nvmet_execute_identify_desclist(req); @@ -696,8 +1121,8 @@ static void nvmet_execute_identify(struct nvmet_req *req) case NVME_ID_CNS_CS_NS: switch (req->cmd->identify.csi) { case NVME_CSI_NVM: - /* Not supported */ - break; + nvme_execute_identify_ns_nvm(req); + return; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) { nvmet_execute_identify_ns_zns(req); @@ -719,19 +1144,28 @@ static void nvmet_execute_identify(struct nvmet_req *req) break; } break; + case NVME_ID_CNS_NS_ACTIVE_LIST_CS: + nvmet_execute_identify_nslist(req, true); + return; + case NVME_ID_CNS_NS_CS_INDEP: + nvmet_execute_id_cs_indep(req); + return; + case NVME_ID_CNS_ENDGRP_LIST: + nvmet_execute_identify_endgrp_list(req); + return; } pr_debug("unhandled identify cns %d on qid %d\n", req->cmd->identify.cns, req->sq->qid); req->error_loc = offsetof(struct nvme_identify, cns); - nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_STATUS_DNR); } /* * A "minimum viable" abort implementation: the command is mandatory in the * spec, but we are not required to do any useful work. We couldn't really * do a useful abort, so don't bother even with waiting for the command - * to be exectuted and return immediately telling the command to abort + * to be executed and return immediately telling the command to abort * wasn't found. */ static void nvmet_execute_abort(struct nvmet_req *req) @@ -807,7 +1241,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) if (val32 & ~mask) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); @@ -816,6 +1250,92 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) return 0; } +static u16 nvmet_set_feat_host_id(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + + if (!nvmet_is_pci_ctrl(ctrl)) + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; + + /* + * The NVMe base specifications v2.1 recommends supporting 128-bits host + * IDs (section 5.1.25.1.28.1). However, that same section also says + * that "The controller may support a 64-bit Host Identifier and/or an + * extended 128-bit Host Identifier". So simplify this support and do + * not support 64-bits host IDs to avoid needing to check that all + * controllers associated with the same subsystem all use the same host + * ID size. + */ + if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid, + sizeof(req->sq->ctrl->hostid)); +} + +static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_irq_coalesce irqc = { + .time = (cdw11 >> 8) & 0xff, + .thr = cdw11 & 0xff, + }; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); +} + +static u16 nvmet_set_feat_irq_config(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_irq_config irqcfg = { + .iv = cdw11 & 0xffff, + .cd = (cdw11 >> 16) & 0x1, + }; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); +} + +static u16 nvmet_set_feat_arbitration(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_arbitration arb = { + .hpw = (cdw11 >> 24) & 0xff, + .mpw = (cdw11 >> 16) & 0xff, + .lpw = (cdw11 >> 8) & 0xff, + .ab = cdw11 & 0x3, + }; + + if (!ctrl->ops->set_feature) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); +} + void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = nvmet_req_subsys(req); @@ -829,16 +1349,25 @@ void nvmet_execute_set_features(struct nvmet_req *req) return; switch (cdw10 & 0xff) { + case NVME_FEAT_ARBITRATION: + status = nvmet_set_feat_arbitration(req); + break; case NVME_FEAT_NUM_QUEUES: ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; if (ncqr == 0xffff || nsqr == 0xffff) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } nvmet_set_result(req, (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); break; + case NVME_FEAT_IRQ_COALESCE: + status = nvmet_set_feat_irq_coalesce(req); + break; + case NVME_FEAT_IRQ_CONFIG: + status = nvmet_set_feat_irq_config(req); + break; case NVME_FEAT_KATO: status = nvmet_set_feat_kato(req); break; @@ -846,14 +1375,17 @@ void nvmet_execute_set_features(struct nvmet_req *req) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: - status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + status = nvmet_set_feat_host_id(req); break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); break; + case NVME_FEAT_RESV_MASK: + status = nvmet_set_feat_resv_notif_mask(req, cdw11); + break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -880,6 +1412,79 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) return 0; } +static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_feat_irq_coalesce irqc = { }; + u16 status; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_get_feat_irq_config(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff; + struct nvmet_feat_irq_config irqcfg = { .iv = iv }; + u16 status; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_get_feat_arbitration(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_feat_arbitration arb = { }; + u16 status; + + if (!ctrl->ops->get_feature) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, + ((u32)arb.hpw << 24) | + ((u32)arb.mpw << 16) | + ((u32)arb.lpw << 8) | + (arb.ab & 0x3)); + + return NVME_SC_SUCCESS; +} + void nvmet_get_feat_kato(struct nvmet_req *req) { nvmet_set_result(req, req->sq->ctrl->kato * 1000); @@ -906,21 +1511,24 @@ void nvmet_execute_get_features(struct nvmet_req *req) * need to come up with some fake values for these. */ #if 0 - case NVME_FEAT_ARBITRATION: - break; case NVME_FEAT_POWER_MGMT: break; case NVME_FEAT_TEMP_THRESH: break; case NVME_FEAT_ERR_RECOVERY: break; + case NVME_FEAT_WRITE_ATOMIC: + break; +#endif + case NVME_FEAT_ARBITRATION: + status = nvmet_get_feat_arbitration(req); + break; case NVME_FEAT_IRQ_COALESCE: + status = nvmet_get_feat_irq_coalesce(req); break; case NVME_FEAT_IRQ_CONFIG: + status = nvmet_get_feat_irq_config(req); break; - case NVME_FEAT_WRITE_ATOMIC: - break; -#endif case NVME_FEAT_ASYNC_EVENT: nvmet_get_feat_async_event(req); break; @@ -939,7 +1547,7 @@ void nvmet_execute_get_features(struct nvmet_req *req) if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { req->error_loc = offsetof(struct nvme_common_command, cdw11); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -949,10 +1557,13 @@ void nvmet_execute_get_features(struct nvmet_req *req) case NVME_FEAT_WRITE_PROTECT: status = nvmet_get_feat_write_protect(req); break; + case NVME_FEAT_RESV_MASK: + status = nvmet_get_feat_resv_notif_mask(req); + break; default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -969,7 +1580,7 @@ void nvmet_execute_async_event(struct nvmet_req *req) mutex_lock(&ctrl->lock); if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) { mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_STATUS_DNR); return; } ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; @@ -998,6 +1609,27 @@ out: nvmet_req_complete(req, status); } +u32 nvmet_admin_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + if (nvme_is_fabrics(cmd)) + return nvmet_fabrics_admin_cmd_data_len(req); + if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) + return nvmet_discovery_cmd_data_len(req); + + switch (cmd->common.opcode) { + case nvme_admin_get_log_page: + return nvmet_get_log_page_len(cmd); + case nvme_admin_identify: + return NVME_IDENTIFY_DATA_SIZE; + case nvme_admin_get_features: + return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10)); + default: + return 0; + } +} + u16 nvmet_parse_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -1005,8 +1637,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (nvme_is_fabrics(cmd)) return nvmet_parse_fabrics_admin_cmd(req); - if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) return nvmet_parse_discovery_cmd(req); @@ -1014,13 +1644,30 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (unlikely(ret)) return ret; + /* For PCI controllers, admin commands shall not use SGL. */ + if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid && + cmd->common.flags & NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + if (nvmet_is_passthru_req(req)) return nvmet_parse_passthru_admin_cmd(req); switch (cmd->common.opcode) { + case nvme_admin_delete_sq: + req->execute = nvmet_execute_delete_sq; + return 0; + case nvme_admin_create_sq: + req->execute = nvmet_execute_create_sq; + return 0; case nvme_admin_get_log_page: req->execute = nvmet_execute_get_log_page; return 0; + case nvme_admin_delete_cq: + req->execute = nvmet_execute_delete_cq; + return 0; + case nvme_admin_create_cq: + req->execute = nvmet_execute_create_cq; + return 0; case nvme_admin_identify: req->execute = nvmet_execute_identify; return 0; diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index 7d2633940f9b..2eadeb7e06f2 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -15,7 +15,8 @@ #include <linux/ctype.h> #include <linux/random.h> #include <linux/nvme-auth.h> -#include <asm/unaligned.h> +#include <linux/nvme-keyring.h> +#include <linux/unaligned.h> #include "nvmet.h" @@ -25,6 +26,18 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, unsigned char key_hash; char *dhchap_secret; + if (!strlen(secret)) { + if (set_ctrl) { + kfree(host->dhchap_ctrl_secret); + host->dhchap_ctrl_secret = NULL; + host->dhchap_ctrl_key_hash = 0; + } else { + kfree(host->dhchap_secret); + host->dhchap_secret = NULL; + host->dhchap_key_hash = 0; + } + return 0; + } if (sscanf(secret, "DHHC-1:%hhd:%*s", &key_hash) != 1) return -EINVAL; if (key_hash > 3) { @@ -103,6 +116,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) pr_debug("%s: ctrl %d failed to generate private key, err %d\n", __func__, ctrl->cntlid, ret); kfree_sensitive(ctrl->dh_key); + ctrl->dh_key = NULL; return ret; } ctrl->dh_keysize = crypto_kpp_maxsize(ctrl->dh_tfm); @@ -126,7 +140,7 @@ int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id) return ret; } -u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) { int ret = 0; struct nvmet_host_link *p; @@ -152,6 +166,11 @@ u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) goto out_unlock; } + if (nvmet_queue_tls_keyid(sq)) { + pr_debug("host %s tls enabled\n", ctrl->hostnqn); + goto out_unlock; + } + ret = nvmet_setup_dhgroup(ctrl, host->dhchap_dhgroup_id); if (ret < 0) { pr_warn("Failed to setup DH group"); @@ -220,6 +239,9 @@ out_unlock: void nvmet_auth_sq_free(struct nvmet_sq *sq) { cancel_delayed_work(&sq->auth_expired_work); +#ifdef CONFIG_NVME_TARGET_TCP_TLS + sq->tls_key = NULL; +#endif kfree(sq->dhchap_c1); sq->dhchap_c1 = NULL; kfree(sq->dhchap_c2); @@ -248,13 +270,22 @@ void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) nvme_auth_free_key(ctrl->ctrl_key); ctrl->ctrl_key = NULL; } +#ifdef CONFIG_NVME_TARGET_TCP_TLS + if (ctrl->tls_key) { + key_put(ctrl->tls_key); + ctrl->tls_key = NULL; + } +#endif } bool nvmet_check_auth_status(struct nvmet_req *req) { - if (req->sq->ctrl->host_key && - !req->sq->authenticated) - return false; + if (req->sq->ctrl->host_key) { + if (req->sq->qid > 0) + return true; + if (!req->sq->authenticated) + return false; + } return true; } @@ -262,7 +293,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, unsigned int shash_len) { struct crypto_shash *shash_tfm; - struct shash_desc *shash; + SHASH_DESC_ON_STACK(shash, shash_tfm); struct nvmet_ctrl *ctrl = req->sq->ctrl; const char *hash_name; u8 *challenge = req->sq->dhchap_c1; @@ -314,19 +345,13 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c1, challenge, shash_len); if (ret) - goto out_free_response; + goto out; } pr_debug("ctrl %d qid %d host response seq %u transaction %d\n", ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, req->sq->dhchap_tid); - shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), - GFP_KERNEL); - if (!shash) { - ret = -ENOMEM; - goto out_free_response; - } shash->tfm = shash_tfm; ret = crypto_shash_init(shash); if (ret) @@ -342,28 +367,28 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, ret = crypto_shash_update(shash, buf, 2); if (ret) goto out; - memset(buf, 0, 4); + *buf = req->sq->sc_c; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; ret = crypto_shash_update(shash, "HostHost", 8); if (ret) goto out; + memset(buf, 0, 4); ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); if (ret) goto out; - ret = crypto_shash_update(shash, ctrl->subsysnqn, - strlen(ctrl->subsysnqn)); + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, + strlen(ctrl->subsys->subsysnqn)); if (ret) goto out; ret = crypto_shash_final(shash, response); out: if (challenge != req->sq->dhchap_c1) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: @@ -404,7 +429,7 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, } transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, - ctrl->subsysnqn); + ctrl->subsys->subsysnqn); if (IS_ERR(transformed_key)) { ret = PTR_ERR(transformed_key); goto out_free_tfm; @@ -427,14 +452,14 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, req->sq->dhchap_c2, challenge, shash_len); if (ret) - goto out_free_response; + goto out_free_challenge; } shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(shash_tfm), GFP_KERNEL); if (!shash) { ret = -ENOMEM; - goto out_free_response; + goto out_free_challenge; } shash->tfm = shash_tfm; @@ -459,8 +484,8 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, ret = crypto_shash_update(shash, "Controller", 10); if (ret) goto out; - ret = crypto_shash_update(shash, ctrl->subsysnqn, - strlen(ctrl->subsysnqn)); + ret = crypto_shash_update(shash, ctrl->subsys->subsysnqn, + strlen(ctrl->subsys->subsysnqn)); if (ret) goto out; ret = crypto_shash_update(shash, buf, 1); @@ -471,9 +496,10 @@ int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, goto out; ret = crypto_shash_final(shash, response); out: + kfree(shash); +out_free_challenge: if (challenge != req->sq->dhchap_c2) kfree(challenge); - kfree(shash); out_free_response: nvme_auth_free_key(transformed_key); out_free_tfm: @@ -527,3 +553,59 @@ int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, return ret; } + +void nvmet_auth_insert_psk(struct nvmet_sq *sq) +{ + int hash_len = nvme_auth_hmac_hash_len(sq->ctrl->shash_id); + u8 *psk, *digest, *tls_psk; + size_t psk_len; + int ret; +#ifdef CONFIG_NVME_TARGET_TCP_TLS + struct key *tls_key = NULL; +#endif + + ret = nvme_auth_generate_psk(sq->ctrl->shash_id, + sq->dhchap_skey, + sq->dhchap_skey_len, + sq->dhchap_c1, sq->dhchap_c2, + hash_len, &psk, &psk_len); + if (ret) { + pr_warn("%s: ctrl %d qid %d failed to generate PSK, error %d\n", + __func__, sq->ctrl->cntlid, sq->qid, ret); + return; + } + ret = nvme_auth_generate_digest(sq->ctrl->shash_id, psk, psk_len, + sq->ctrl->subsys->subsysnqn, + sq->ctrl->hostnqn, &digest); + if (ret) { + pr_warn("%s: ctrl %d qid %d failed to generate digest, error %d\n", + __func__, sq->ctrl->cntlid, sq->qid, ret); + goto out_free_psk; + } + ret = nvme_auth_derive_tls_psk(sq->ctrl->shash_id, psk, psk_len, + digest, &tls_psk); + if (ret) { + pr_warn("%s: ctrl %d qid %d failed to derive TLS PSK, error %d\n", + __func__, sq->ctrl->cntlid, sq->qid, ret); + goto out_free_digest; + } +#ifdef CONFIG_NVME_TARGET_TCP_TLS + tls_key = nvme_tls_psk_refresh(NULL, sq->ctrl->hostnqn, + sq->ctrl->subsys->subsysnqn, + sq->ctrl->shash_id, tls_psk, psk_len, + digest); + if (IS_ERR(tls_key)) { + pr_warn("%s: ctrl %d qid %d failed to refresh key, error %ld\n", + __func__, sq->ctrl->cntlid, sq->qid, PTR_ERR(tls_key)); + tls_key = NULL; + } + if (sq->ctrl->tls_key) + key_put(sq->ctrl->tls_key); + sq->ctrl->tls_key = tls_key; +#endif + kfree_sensitive(tls_psk); +out_free_digest: + kfree_sensitive(digest); +out_free_psk: + kfree_sensitive(psk); +} diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 685e89b35d33..e44ef69dffc2 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -37,6 +37,7 @@ static struct nvmet_type_name_map nvmet_transport[] = { { NVMF_TRTYPE_RDMA, "rdma" }, { NVMF_TRTYPE_FC, "fc" }, { NVMF_TRTYPE_TCP, "tcp" }, + { NVMF_TRTYPE_PCI, "pci" }, { NVMF_TRTYPE_LOOP, "loop" }, }; @@ -46,6 +47,7 @@ static const struct nvmet_type_name_map nvmet_addr_family[] = { { NVMF_ADDR_FAMILY_IP6, "ipv6" }, { NVMF_ADDR_FAMILY_IB, "ib" }, { NVMF_ADDR_FAMILY_FC, "fc" }, + { NVMF_ADDR_FAMILY_PCI, "pci" }, { NVMF_ADDR_FAMILY_LOOP, "loop" }, }; @@ -769,6 +771,32 @@ static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item, CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size); +static ssize_t nvmet_ns_resv_enable_show(struct config_item *item, char *page) +{ + return sysfs_emit(page, "%d\n", to_nvmet_ns(item)->pr.enable); +} + +static ssize_t nvmet_ns_resv_enable_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + bool val; + + if (kstrtobool(page, &val)) + return -EINVAL; + + mutex_lock(&ns->subsys->lock); + if (ns->enabled) { + pr_err("the ns:%d is already enabled.\n", ns->nsid); + mutex_unlock(&ns->subsys->lock); + return -EINVAL; + } + ns->pr.enable = val; + mutex_unlock(&ns->subsys->lock); + return count; +} +CONFIGFS_ATTR(nvmet_ns_, resv_enable); + static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_device_path, &nvmet_ns_attr_device_nguid, @@ -777,24 +805,13 @@ static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_enable, &nvmet_ns_attr_buffered_io, &nvmet_ns_attr_revalidate_size, + &nvmet_ns_attr_resv_enable, #ifdef CONFIG_PCI_P2PDMA &nvmet_ns_attr_p2pmem, #endif NULL, }; -bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid) -{ - struct config_item *ns_item; - char name[12]; - - snprintf(name, sizeof(name), "%u", nsid); - mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex); - ns_item = config_group_find_item(&subsys->namespaces_group, name); - mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex); - return ns_item != NULL; -} - static void nvmet_ns_release(struct config_item *item) { struct nvmet_ns *ns = to_nvmet_ns(item); @@ -1385,6 +1402,49 @@ out_unlock: } CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); +static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id); +} + +static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item, + const char *page, size_t count) +{ + u16 vid; + + if (kstrtou16(page, 0, &vid)) + return -EINVAL; + + down_write(&nvmet_config_sem); + to_subsys(item)->vendor_id = vid; + up_write(&nvmet_config_sem); + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id); + +static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "0x%x\n", + to_subsys(item)->subsys_vendor_id); +} + +static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item, + const char *page, size_t count) +{ + u16 ssvid; + + if (kstrtou16(page, 0, &ssvid)) + return -EINVAL; + + down_write(&nvmet_config_sem); + to_subsys(item)->subsys_vendor_id = ssvid; + up_write(&nvmet_config_sem); + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id); + static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, char *page) { @@ -1613,6 +1673,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_serial, &nvmet_subsys_attr_attr_cntlid_min, &nvmet_subsys_attr_attr_cntlid_max, + &nvmet_subsys_attr_attr_vendor_id, + &nvmet_subsys_attr_attr_subsys_vendor_id, &nvmet_subsys_attr_attr_model, &nvmet_subsys_attr_attr_qid_max, &nvmet_subsys_attr_attr_ieee_oui, @@ -1767,6 +1829,7 @@ static struct config_group *nvmet_referral_make( return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&port->entry); + port->disc_addr.trtype = NVMF_TRTYPE_MAX; config_group_init_type_name(&port->group, name, &nvmet_referral_type); return &port->group; @@ -1992,6 +2055,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, port->inline_data_size = -1; /* < 0 == let the transport choose */ port->max_queue_size = -1; /* < 0 == let the transport choose */ + port->disc_addr.trtype = NVMF_TRTYPE_MAX; port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; @@ -2227,12 +2291,17 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, const char *page, size_t count) { struct list_head *entry; + char *old_nqn, *new_nqn; size_t len; len = strcspn(page, "\n"); if (!len || len > NVMF_NQN_FIELD_LEN - 1) return -EINVAL; + new_nqn = kstrndup(page, len, GFP_KERNEL); + if (!new_nqn) + return -ENOMEM; + down_write(&nvmet_config_sem); list_for_each(entry, &nvmet_subsystems_group.cg_children) { struct config_item *item = @@ -2241,13 +2310,15 @@ static ssize_t nvmet_root_discovery_nqn_store(struct config_item *item, if (!strncmp(config_item_name(item), page, len)) { pr_err("duplicate NQN %s\n", config_item_name(item)); up_write(&nvmet_config_sem); + kfree(new_nqn); return -EINVAL; } } - memset(nvmet_disc_subsys->subsysnqn, 0, NVMF_NQN_FIELD_LEN); - memcpy(nvmet_disc_subsys->subsysnqn, page, len); + old_nqn = nvmet_disc_subsys->subsysnqn; + nvmet_disc_subsys->subsysnqn = new_nqn; up_write(&nvmet_config_sem); + kfree(old_nqn); return len; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 4ff460ba2826..cc88e5a28c8a 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -16,6 +16,7 @@ #include "trace.h" #include "nvmet.h" +#include "debugfs.h" struct kmem_cache *nvmet_bvec_cache; struct workqueue_struct *buffered_io_wq; @@ -39,7 +40,7 @@ EXPORT_SYMBOL_GPL(nvmet_wq); * - the nvmet_transports array * * When updating any of those lists/structures write lock should be obtained, - * while when reading (popolating discovery log page or checking host-subsystem + * while when reading (populating discovery log page or checking host-subsystem * link) read lock is obtained to allow concurrent reads. */ DECLARE_RWSEM(nvmet_config_sem); @@ -55,20 +56,13 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) return NVME_SC_SUCCESS; case -ENOSPC: req->error_loc = offsetof(struct nvme_rw_command, length); - return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + return NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; case -EREMOTEIO: req->error_loc = offsetof(struct nvme_rw_command, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; case -EOPNOTSUPP: req->error_loc = offsetof(struct nvme_common_command, opcode); - switch (req->cmd->common.opcode) { - case nvme_cmd_dsm: - case nvme_cmd_write_zeroes: - return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; - default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } - break; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; case -ENODATA: req->error_loc = offsetof(struct nvme_rw_command, nsid); return NVME_SC_ACCESS_DENIED; @@ -76,7 +70,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; } } @@ -86,7 +80,7 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req) req->sq->qid); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, @@ -97,7 +91,7 @@ u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, { if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -106,7 +100,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len) { if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -115,7 +109,7 @@ u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len) { if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) { req->error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR; } return 0; } @@ -126,7 +120,7 @@ static u32 nvmet_max_nsid(struct nvmet_subsys *subsys) unsigned long idx; u32 nsid = 0; - xa_for_each(&subsys->namespaces, idx, cur) + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, cur) nsid = cur->nsid; return nsid; @@ -145,7 +139,7 @@ static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl) while (ctrl->nr_async_event_cmds) { req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; mutex_unlock(&ctrl->lock); - nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR); + nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_STATUS_DNR); mutex_lock(&ctrl->lock); } mutex_unlock(&ctrl->lock); @@ -323,6 +317,9 @@ int nvmet_enable_port(struct nvmet_port *port) lockdep_assert_held(&nvmet_config_sem); + if (port->disc_addr.trtype == NVMF_TRTYPE_MAX) + return -EINVAL; + ops = nvmet_transports[port->disc_addr.trtype]; if (!ops) { up_write(&nvmet_config_sem); @@ -440,11 +437,14 @@ u16 nvmet_req_find_ns(struct nvmet_req *req) struct nvmet_subsys *subsys = nvmet_req_subsys(req); req->ns = xa_load(&subsys->namespaces, nsid); - if (unlikely(!req->ns)) { + if (unlikely(!req->ns || !req->ns->enabled)) { req->error_loc = offsetof(struct nvme_common_command, nsid); - if (nvmet_subsys_nsid_exists(subsys, nsid)) - return NVME_SC_INTERNAL_PATH_ERROR; - return NVME_SC_INVALID_NS | NVME_SC_DNR; + if (!req->ns) /* ns doesn't exist! */ + return NVME_SC_INVALID_NS | NVME_STATUS_DNR; + + /* ns exists but it's disabled */ + req->ns = NULL; + return NVME_SC_INTERNAL_PATH_ERROR; } percpu_ref_get(&req->ns->ref); @@ -513,9 +513,6 @@ static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns) return 0; } -/* - * Note: ctrl->subsys->lock should be held when calling this function - */ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, struct nvmet_ns *ns) { @@ -523,6 +520,8 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, struct pci_dev *p2p_dev; int ret; + lockdep_assert_held(&ctrl->subsys->lock); + if (!ctrl->p2p_client || !ns->use_p2pmem) return; @@ -581,10 +580,6 @@ int nvmet_ns_enable(struct nvmet_ns *ns) if (ns->enabled) goto out_unlock; - ret = -EMFILE; - if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) - goto out_unlock; - ret = nvmet_bdev_ns_enable(ns); if (ret == -ENOTBLK) ret = nvmet_file_ns_enable(ns); @@ -598,30 +593,25 @@ int nvmet_ns_enable(struct nvmet_ns *ns) list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) nvmet_p2pmem_ns_add_p2p(ctrl, ns); - ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace, - 0, GFP_KERNEL); - if (ret) - goto out_dev_put; - - if (ns->nsid > subsys->max_nsid) - subsys->max_nsid = ns->nsid; - - ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL); - if (ret) - goto out_restore_subsys_maxnsid; + if (ns->pr.enable) { + ret = nvmet_pr_init_ns(ns); + if (ret) + goto out_dev_put; + } - subsys->nr_namespaces++; + if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL)) + goto out_pr_exit; nvmet_ns_changed(subsys, ns->nsid); ns->enabled = true; + xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); ret = 0; out_unlock: mutex_unlock(&subsys->lock); return ret; - -out_restore_subsys_maxnsid: - subsys->max_nsid = nvmet_max_nsid(subsys); - percpu_ref_exit(&ns->ref); +out_pr_exit: + if (ns->pr.enable) + nvmet_pr_exit_ns(ns); out_dev_put: list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); @@ -640,9 +630,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) goto out_unlock; ns->enabled = false; - xa_erase(&ns->subsys->namespaces, ns->nsid); - if (ns->nsid == subsys->max_nsid) - subsys->max_nsid = nvmet_max_nsid(subsys); + xa_clear_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED); list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); @@ -653,7 +641,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) * Now that we removed the namespaces from the lookup list, we * can kill the per_cpu ref and wait for any remaining references * to be dropped, as well as a RCU grace period for anyone only - * using the namepace under rcu_read_lock(). Note that we can't + * using the namespace under rcu_read_lock(). Note that we can't * use call_rcu here as we need to ensure the namespaces have * been fully destroyed before unloading the module. */ @@ -662,9 +650,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns) wait_for_completion(&ns->disable_done); percpu_ref_exit(&ns->ref); - mutex_lock(&subsys->lock); + if (ns->pr.enable) + nvmet_pr_exit_ns(ns); - subsys->nr_namespaces--; + mutex_lock(&subsys->lock); nvmet_ns_changed(subsys, ns->nsid); nvmet_ns_dev_disable(ns); out_unlock: @@ -673,8 +662,19 @@ out_unlock: void nvmet_ns_free(struct nvmet_ns *ns) { + struct nvmet_subsys *subsys = ns->subsys; + nvmet_ns_disable(ns); + mutex_lock(&subsys->lock); + + xa_erase(&subsys->namespaces, ns->nsid); + if (ns->nsid == subsys->max_nsid) + subsys->max_nsid = nvmet_max_nsid(subsys); + + subsys->nr_namespaces--; + mutex_unlock(&subsys->lock); + down_write(&nvmet_ana_sem); nvmet_ana_group_enabled[ns->anagrpid]--; up_write(&nvmet_ana_sem); @@ -687,15 +687,30 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ns *ns; + mutex_lock(&subsys->lock); + + if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) + goto out_unlock; + ns = kzalloc(sizeof(*ns), GFP_KERNEL); if (!ns) - return NULL; + goto out_unlock; init_completion(&ns->disable_done); ns->nsid = nsid; ns->subsys = subsys; + if (ns->nsid > subsys->max_nsid) + subsys->max_nsid = nsid; + + if (xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL)) + goto out_exit; + + subsys->nr_namespaces++; + + mutex_unlock(&subsys->lock); + down_write(&nvmet_ana_sem); ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; nvmet_ana_group_enabled[ns->anagrpid]++; @@ -706,6 +721,12 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) ns->csi = NVME_CSI_NVM; return ns; +out_exit: + subsys->max_nsid = nvmet_max_nsid(subsys); + kfree(ns); +out_unlock: + mutex_unlock(&subsys->lock); + return NULL; } static void nvmet_update_sq_head(struct nvmet_req *req) @@ -753,6 +774,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status) static void __nvmet_req_complete(struct nvmet_req *req, u16 status) { struct nvmet_ns *ns = req->ns; + struct nvmet_pr_per_ctrl_ref *pc_ref = req->pc_ref; if (!req->sq->sqhd_disabled) nvmet_update_sq_head(req); @@ -765,6 +787,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) trace_nvmet_req_complete(req); req->ops->queue_response(req); + + if (pc_ref) + nvmet_pr_put_ns_pc_ref(pc_ref); if (ns) nvmet_put_namespace(ns); } @@ -778,11 +803,43 @@ void nvmet_req_complete(struct nvmet_req *req, u16 status) } EXPORT_SYMBOL_GPL(nvmet_req_complete); +void nvmet_cq_init(struct nvmet_cq *cq) +{ + refcount_set(&cq->ref, 1); +} +EXPORT_SYMBOL_GPL(nvmet_cq_init); + +bool nvmet_cq_get(struct nvmet_cq *cq) +{ + return refcount_inc_not_zero(&cq->ref); +} +EXPORT_SYMBOL_GPL(nvmet_cq_get); + +void nvmet_cq_put(struct nvmet_cq *cq) +{ + if (refcount_dec_and_test(&cq->ref)) + nvmet_cq_destroy(cq); +} +EXPORT_SYMBOL_GPL(nvmet_cq_put); + void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size) { cq->qid = qid; cq->size = size; + + ctrl->cqs[qid] = cq; +} + +void nvmet_cq_destroy(struct nvmet_cq *cq) +{ + struct nvmet_ctrl *ctrl = cq->ctrl; + + if (ctrl) { + ctrl->cqs[cq->qid] = NULL; + nvmet_ctrl_put(cq->ctrl); + cq->ctrl = NULL; + } } void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, @@ -802,6 +859,99 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) complete(&sq->confirm_done); } +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) +{ + if (!ctrl->cqs) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + if (cqid > ctrl->subsys->max_qid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if ((create && ctrl->cqs[cqid]) || (!create && !ctrl->cqs[cqid])) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + return NVME_SC_SUCCESS; +} + +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create) +{ + if (!cqid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + return nvmet_check_cqid(ctrl, cqid, create); +} + +bool nvmet_cq_in_use(struct nvmet_cq *cq) +{ + return refcount_read(&cq->ref) > 1; +} +EXPORT_SYMBOL_GPL(nvmet_cq_in_use); + +u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, + u16 qid, u16 size) +{ + u16 status; + + status = nvmet_check_cqid(ctrl, qid, true); + if (status != NVME_SC_SUCCESS) + return status; + + if (!kref_get_unless_zero(&ctrl->ref)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + cq->ctrl = ctrl; + + nvmet_cq_init(cq); + nvmet_cq_setup(ctrl, cq, qid, size); + + return NVME_SC_SUCCESS; +} +EXPORT_SYMBOL_GPL(nvmet_cq_create); + +u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, + bool create) +{ + if (!ctrl->sqs) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + if (sqid > ctrl->subsys->max_qid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if ((create && ctrl->sqs[sqid]) || + (!create && !ctrl->sqs[sqid])) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + return NVME_SC_SUCCESS; +} + +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + struct nvmet_cq *cq, u16 sqid, u16 size) +{ + u16 status; + int ret; + + if (!kref_get_unless_zero(&ctrl->ref)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + status = nvmet_check_sqid(ctrl, sqid, true); + if (status != NVME_SC_SUCCESS) + return status; + + ret = nvmet_sq_init(sq, cq); + if (ret) { + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto ctrl_put; + } + + nvmet_sq_setup(ctrl, sq, sqid, size); + sq->ctrl = ctrl; + + return NVME_SC_SUCCESS; + +ctrl_put: + nvmet_ctrl_put(ctrl); + return status; +} +EXPORT_SYMBOL_GPL(nvmet_sq_create); + void nvmet_sq_destroy(struct nvmet_sq *sq) { struct nvmet_ctrl *ctrl = sq->ctrl; @@ -817,6 +967,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) wait_for_completion(&sq->free_done); percpu_ref_exit(&sq->ref); nvmet_auth_sq_free(sq); + nvmet_cq_put(sq->cq); /* * we must reference the ctrl again after waiting for inflight IO @@ -849,18 +1000,23 @@ static void nvmet_sq_free(struct percpu_ref *ref) complete(&sq->free_done); } -int nvmet_sq_init(struct nvmet_sq *sq) +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq) { int ret; + if (!nvmet_cq_get(cq)) + return -EINVAL; + ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); if (ret) { pr_err("percpu_ref init failed!\n"); + nvmet_cq_put(cq); return ret; } init_completion(&sq->free_done); init_completion(&sq->confirm_done); nvmet_auth_sq_init(sq); + sq->cq = cq; return 0; } @@ -895,6 +1051,33 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) return 0; } +static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + u32 metadata_len = 0; + + if (nvme_is_fabrics(cmd)) + return nvmet_fabrics_io_cmd_data_len(req); + + if (!req->ns) + return 0; + + switch (req->cmd->common.opcode) { + case nvme_cmd_read: + case nvme_cmd_write: + case nvme_cmd_zone_append: + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) + metadata_len = nvmet_rw_metadata_len(req); + return nvmet_rw_data_len(req) + metadata_len; + case nvme_cmd_dsm: + return nvmet_dsm_len(req); + case nvme_cmd_zone_mgmt_recv: + return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; + default: + return 0; + } +} + static u16 nvmet_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -904,7 +1087,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return nvmet_parse_fabrics_io_cmd(req); if (unlikely(!nvmet_check_auth_status(req))) - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; ret = nvmet_check_ctrl_status(req); if (unlikely(ret)) @@ -928,27 +1111,48 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) return ret; } + if (req->ns->pr.enable) { + ret = nvmet_parse_pr_cmd(req); + if (!ret) + return ret; + } + switch (req->ns->csi) { case NVME_CSI_NVM: if (req->ns->file) - return nvmet_file_parse_io_cmd(req); - return nvmet_bdev_parse_io_cmd(req); + ret = nvmet_file_parse_io_cmd(req); + else + ret = nvmet_bdev_parse_io_cmd(req); + break; case NVME_CSI_ZNS: if (IS_ENABLED(CONFIG_BLK_DEV_ZONED)) - return nvmet_bdev_zns_parse_io_cmd(req); - return NVME_SC_INVALID_IO_CMD_SET; + ret = nvmet_bdev_zns_parse_io_cmd(req); + else + ret = NVME_SC_INVALID_IO_CMD_SET; + break; default: - return NVME_SC_INVALID_IO_CMD_SET; + ret = NVME_SC_INVALID_IO_CMD_SET; } + if (ret) + return ret; + + if (req->ns->pr.enable) { + ret = nvmet_pr_check_cmd_access(req); + if (ret) + return ret; + + ret = nvmet_pr_get_ns_pc_ref(req); + } + return ret; } -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops) { u8 flags = req->cmd->common.flags; u16 status; - req->cq = cq; + req->cq = sq->cq; req->sq = sq; req->ops = ops; req->sg = NULL; @@ -963,23 +1167,27 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, req->ns = NULL; req->error_loc = NVMET_NO_ERROR_LOC; req->error_slba = 0; + req->pc_ref = NULL; /* no support for fused commands yet */ if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) { req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } /* * For fabrics, PSDT field shall describe metadata pointer (MPTR) that * contains an address of a single contiguous physical buffer that is - * byte aligned. + * byte aligned. For PCI controllers, this is optional so not enforced. */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { - req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; - goto fail; + if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) { + req->error_loc = + offsetof(struct nvme_common_command, flags); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto fail; + } } if (unlikely(!req->sq->ctrl)) @@ -996,7 +1204,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, trace_nvmet_req_init(req, req->cmd); if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto fail; } @@ -1014,16 +1222,34 @@ EXPORT_SYMBOL_GPL(nvmet_req_init); void nvmet_req_uninit(struct nvmet_req *req) { percpu_ref_put(&req->sq->ref); + if (req->pc_ref) + nvmet_pr_put_ns_pc_ref(req->pc_ref); if (req->ns) nvmet_put_namespace(req->ns); } EXPORT_SYMBOL_GPL(nvmet_req_uninit); +size_t nvmet_req_transfer_len(struct nvmet_req *req) +{ + if (likely(req->sq->qid != 0)) + return nvmet_io_cmd_transfer_len(req); + if (unlikely(!req->sq->ctrl)) + return nvmet_connect_cmd_data_len(req); + return nvmet_admin_cmd_data_len(req); +} +EXPORT_SYMBOL_GPL(nvmet_req_transfer_len); + bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { + u16 status; + req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + if (req->cmd->common.flags & NVME_CMD_SGL_ALL) + status = NVME_SC_SGL_INVALID_DATA; + else + status = NVME_SC_INVALID_FIELD; + nvmet_req_complete(req, status | NVME_STATUS_DNR); return false; } @@ -1034,8 +1260,14 @@ EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { + u16 status; + req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR); + if (req->cmd->common.flags & NVME_CMD_SGL_ALL) + status = NVME_SC_SGL_INVALID_DATA; + else + status = NVME_SC_INVALID_FIELD; + nvmet_req_complete(req, status | NVME_STATUS_DNR); return false; } @@ -1126,41 +1358,6 @@ void nvmet_req_free_sgls(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); -static inline bool nvmet_cc_en(u32 cc) -{ - return (cc >> NVME_CC_EN_SHIFT) & 0x1; -} - -static inline u8 nvmet_cc_css(u32 cc) -{ - return (cc >> NVME_CC_CSS_SHIFT) & 0x7; -} - -static inline u8 nvmet_cc_mps(u32 cc) -{ - return (cc >> NVME_CC_MPS_SHIFT) & 0xf; -} - -static inline u8 nvmet_cc_ams(u32 cc) -{ - return (cc >> NVME_CC_AMS_SHIFT) & 0x7; -} - -static inline u8 nvmet_cc_shn(u32 cc) -{ - return (cc >> NVME_CC_SHN_SHIFT) & 0x3; -} - -static inline u8 nvmet_cc_iosqes(u32 cc) -{ - return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; -} - -static inline u8 nvmet_cc_iocqes(u32 cc) -{ - return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; -} - static inline bool nvmet_css_supported(u8 cc_css) { switch (cc_css << NVME_CC_CSS_SHIFT) { @@ -1237,6 +1434,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; mutex_unlock(&ctrl->lock); } +EXPORT_SYMBOL_GPL(nvmet_update_cc); static void nvmet_init_cap(struct nvmet_ctrl *ctrl) { @@ -1304,18 +1502,18 @@ u16 nvmet_check_ctrl_status(struct nvmet_req *req) if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { pr_err("got cmd %d while CC.EN == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n", req->cmd->common.opcode, req->sq->qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } if (unlikely(!nvmet_check_auth_status(req))) { pr_warn("qid %d not authenticated\n", req->sq->qid); - return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR; + return NVME_SC_AUTH_REQUIRED | NVME_STATUS_DNR; } return 0; } @@ -1340,32 +1538,30 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) return false; } -/* - * Note: ctrl->subsys->lock should be held when calling this function - */ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, - struct nvmet_req *req) + struct device *p2p_client) { struct nvmet_ns *ns; unsigned long idx; - if (!req->p2p_client) + lockdep_assert_held(&ctrl->subsys->lock); + + if (!p2p_client) return; - ctrl->p2p_client = get_device(req->p2p_client); + ctrl->p2p_client = get_device(p2p_client); - xa_for_each(&ctrl->subsys->namespaces, idx, ns) + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) nvmet_p2pmem_ns_add_p2p(ctrl, ns); } -/* - * Note: ctrl->subsys->lock should be held when calling this function - */ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl) { struct radix_tree_iter iter; void __rcu **slot; + lockdep_assert_held(&ctrl->subsys->lock); + radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0) pci_dev_put(radix_tree_deref_slot(slot)); @@ -1381,44 +1577,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work) ctrl->ops->delete_ctrl(ctrl); } -u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) +struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; + u32 kato = args->kato; + u8 dhchap_status; int ret; - u16 status; - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - subsys = nvmet_find_get_subsys(req->port, subsysnqn); + args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; + subsys = nvmet_find_get_subsys(args->port, args->subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", - subsysnqn); - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); - req->error_loc = offsetof(struct nvme_common_command, dptr); - goto out; + args->subsysnqn); + args->result = IPO_IATTR_CONNECT_DATA(subsysnqn); + args->error_loc = offsetof(struct nvme_common_command, dptr); + return NULL; } down_read(&nvmet_config_sem); - if (!nvmet_host_allowed(subsys, hostnqn)) { + if (!nvmet_host_allowed(subsys, args->hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", - hostnqn, subsysnqn); - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); + args->hostnqn, args->subsysnqn); + args->result = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); - status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR; - req->error_loc = offsetof(struct nvme_common_command, dptr); + args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; + args->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } up_read(&nvmet_config_sem); - status = NVME_SC_INTERNAL; + args->status = NVME_SC_INTERNAL; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) goto out_put_subsystem; mutex_init(&ctrl->lock); - ctrl->port = req->port; - ctrl->ops = req->ops; + ctrl->port = args->port; + ctrl->ops = args->ops; #ifdef CONFIG_NVME_TARGET_PASSTHRU /* By default, set loop targets to clear IDS by default */ @@ -1432,8 +1628,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); - memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); - memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); + memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); kref_init(&ctrl->ref); ctrl->subsys = subsys; @@ -1452,12 +1647,17 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (!ctrl->sqs) goto out_free_changed_ns_list; + ctrl->cqs = kcalloc(subsys->max_qid + 1, sizeof(struct nvmet_cq *), + GFP_KERNEL); + if (!ctrl->cqs) + goto out_free_sqs; + ret = ida_alloc_range(&cntlid_ida, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { - status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; - goto out_free_sqs; + args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; + goto out_free_cqs; } ctrl->cntlid = ret; @@ -1477,13 +1677,47 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, nvmet_start_keep_alive_timer(ctrl); mutex_lock(&subsys->lock); + ret = nvmet_ctrl_init_pr(ctrl); + if (ret) + goto init_pr_fail; list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); - nvmet_setup_p2p_ns_map(ctrl, req); + nvmet_setup_p2p_ns_map(ctrl, args->p2p_client); + nvmet_debugfs_ctrl_setup(ctrl); mutex_unlock(&subsys->lock); - *ctrlp = ctrl; - return 0; + if (args->hostid) + uuid_copy(&ctrl->hostid, args->hostid); + dhchap_status = nvmet_setup_auth(ctrl, args->sq); + if (dhchap_status) { + pr_err("Failed to setup authentication, dhchap status %u\n", + dhchap_status); + nvmet_ctrl_put(ctrl); + if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) + args->status = + NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; + else + args->status = NVME_SC_INTERNAL; + return NULL; + } + + args->status = NVME_SC_SUCCESS; + + pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s%s.\n", + nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", + ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, + ctrl->pi_support ? " T10-PI is enabled" : "", + nvmet_has_auth(ctrl, args->sq) ? " with DH-HMAC-CHAP" : "", + nvmet_queue_tls_keyid(args->sq) ? ", TLS" : ""); + + return ctrl; + +init_pr_fail: + mutex_unlock(&subsys->lock); + nvmet_stop_keep_alive_timer(ctrl); + ida_free(&cntlid_ida, ctrl->cntlid); +out_free_cqs: + kfree(ctrl->cqs); out_free_sqs: kfree(ctrl->sqs); out_free_changed_ns_list: @@ -1492,9 +1726,9 @@ out_free_ctrl: kfree(ctrl); out_put_subsystem: nvmet_subsys_put(subsys); -out: - return status; + return NULL; } +EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl); static void nvmet_ctrl_free(struct kref *ref) { @@ -1502,6 +1736,7 @@ static void nvmet_ctrl_free(struct kref *ref) struct nvmet_subsys *subsys = ctrl->subsys; mutex_lock(&subsys->lock); + nvmet_ctrl_destroy_pr(ctrl); nvmet_release_p2p_ns_map(ctrl); list_del(&ctrl->subsys_entry); mutex_unlock(&subsys->lock); @@ -1513,10 +1748,13 @@ static void nvmet_ctrl_free(struct kref *ref) nvmet_destroy_auth(ctrl); + nvmet_debugfs_ctrl_free(ctrl); + ida_free(&cntlid_ida, ctrl->cntlid); nvmet_async_events_free(ctrl); kfree(ctrl->sqs); + kfree(ctrl->cqs); kfree(ctrl->changed_ns_list); kfree(ctrl); @@ -1527,6 +1765,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) { kref_put(&ctrl->ref, nvmet_ctrl_free); } +EXPORT_SYMBOL_GPL(nvmet_ctrl_put); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { @@ -1539,6 +1778,14 @@ void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); +ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + if (!ctrl->ops->host_traddr) + return -EOPNOTSUPP; + return ctrl->ops->host_traddr(ctrl, traddr, traddr_len); +} + static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn) { @@ -1633,8 +1880,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, INIT_LIST_HEAD(&subsys->ctrls); INIT_LIST_HEAD(&subsys->hosts); + ret = nvmet_debugfs_subsys_setup(subsys); + if (ret) + goto free_subsysnqn; + return subsys; +free_subsysnqn: + kfree(subsys->subsysnqn); free_fr: kfree(subsys->firmware_rev); free_mn: @@ -1649,8 +1902,12 @@ static void nvmet_subsys_free(struct kref *ref) struct nvmet_subsys *subsys = container_of(ref, struct nvmet_subsys, ref); + WARN_ON_ONCE(!list_empty(&subsys->ctrls)); + WARN_ON_ONCE(!list_empty(&subsys->hosts)); WARN_ON_ONCE(!xa_empty(&subsys->namespaces)); + nvmet_debugfs_subsys_free(subsys); + xa_destroy(&subsys->namespaces); nvmet_passthru_subsys_free(subsys); @@ -1697,21 +1954,28 @@ static int __init nvmet_init(void) goto out_free_zbd_work_queue; nvmet_wq = alloc_workqueue("nvmet-wq", - WQ_MEM_RECLAIM | WQ_UNBOUND, 0); + WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!nvmet_wq) goto out_free_buffered_work_queue; - error = nvmet_init_discovery(); + error = nvmet_init_debugfs(); if (error) goto out_free_nvmet_work_queue; + error = nvmet_init_discovery(); + if (error) + goto out_exit_debugfs; + error = nvmet_init_configfs(); if (error) goto out_exit_discovery; + return 0; out_exit_discovery: nvmet_exit_discovery(); +out_exit_debugfs: + nvmet_exit_debugfs(); out_free_nvmet_work_queue: destroy_workqueue(nvmet_wq); out_free_buffered_work_queue: @@ -1727,6 +1991,7 @@ static void __exit nvmet_exit(void) { nvmet_exit_configfs(); nvmet_exit_discovery(); + nvmet_exit_debugfs(); ida_destroy(&cntlid_ida); destroy_workqueue(nvmet_wq); destroy_workqueue(buffered_io_wq); diff --git a/drivers/nvme/target/debugfs.c b/drivers/nvme/target/debugfs.c new file mode 100644 index 000000000000..5dcbd5aa86e1 --- /dev/null +++ b/drivers/nvme/target/debugfs.c @@ -0,0 +1,229 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DebugFS interface for the NVMe target. + * Copyright (c) 2022-2024 Shadow + * Copyright (c) 2024 SUSE LLC + */ + +#include <linux/debugfs.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> + +#include "nvmet.h" +#include "debugfs.h" + +static struct dentry *nvmet_debugfs; + +#define NVMET_DEBUGFS_ATTR(field) \ + static int field##_open(struct inode *inode, struct file *file) \ + { return single_open(file, field##_show, inode->i_private); } \ + \ + static const struct file_operations field##_fops = { \ + .open = field##_open, \ + .read = seq_read, \ + .release = single_release, \ + } + +#define NVMET_DEBUGFS_RW_ATTR(field) \ + static int field##_open(struct inode *inode, struct file *file) \ + { return single_open(file, field##_show, inode->i_private); } \ + \ + static const struct file_operations field##_fops = { \ + .open = field##_open, \ + .read = seq_read, \ + .write = field##_write, \ + .release = single_release, \ + } + +static int nvmet_ctrl_hostnqn_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_puts(m, ctrl->hostnqn); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_hostnqn); + +static int nvmet_ctrl_kato_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_printf(m, "%d\n", ctrl->kato); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_kato); + +static int nvmet_ctrl_port_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_printf(m, "%d\n", le16_to_cpu(ctrl->port->disc_addr.portid)); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_port); + +static const char *const csts_state_names[] = { + [NVME_CSTS_RDY] = "ready", + [NVME_CSTS_CFS] = "fatal", + [NVME_CSTS_NSSRO] = "reset", + [NVME_CSTS_SHST_OCCUR] = "shutdown", + [NVME_CSTS_SHST_CMPLT] = "completed", + [NVME_CSTS_PP] = "paused", +}; + +static int nvmet_ctrl_state_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + bool sep = false; + int i; + + for (i = 0; i < ARRAY_SIZE(csts_state_names); i++) { + int state = BIT(i); + + if (!(ctrl->csts & state)) + continue; + if (sep) + seq_puts(m, "|"); + sep = true; + if (csts_state_names[state]) + seq_puts(m, csts_state_names[state]); + else + seq_printf(m, "%d", state); + } + if (sep) + seq_printf(m, "\n"); + return 0; +} + +static ssize_t nvmet_ctrl_state_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = file->private_data; + struct nvmet_ctrl *ctrl = m->private; + char reset[16]; + + if (count >= sizeof(reset)) + return -EINVAL; + if (copy_from_user(reset, buf, count)) + return -EFAULT; + if (!memcmp(reset, "fatal", 5)) + nvmet_ctrl_fatal_error(ctrl); + else + return -EINVAL; + return count; +} +NVMET_DEBUGFS_RW_ATTR(nvmet_ctrl_state); + +static int nvmet_ctrl_host_traddr_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + ssize_t size; + char buf[NVMF_TRADDR_SIZE + 1]; + + size = nvmet_ctrl_host_traddr(ctrl, buf, NVMF_TRADDR_SIZE); + if (size < 0) { + buf[0] = '\0'; + size = 0; + } + buf[size] = '\0'; + seq_printf(m, "%s\n", buf); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_host_traddr); + +#ifdef CONFIG_NVME_TARGET_TCP_TLS +static int nvmet_ctrl_tls_key_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + key_serial_t keyid = nvmet_queue_tls_keyid(ctrl->sqs[0]); + + seq_printf(m, "%08x\n", keyid); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_key); + +static int nvmet_ctrl_tls_concat_show(struct seq_file *m, void *p) +{ + struct nvmet_ctrl *ctrl = m->private; + + seq_printf(m, "%d\n", ctrl->concat); + return 0; +} +NVMET_DEBUGFS_ATTR(nvmet_ctrl_tls_concat); +#endif + +int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl) +{ + char name[32]; + struct dentry *parent = ctrl->subsys->debugfs_dir; + int ret; + + if (!parent) + return -ENODEV; + snprintf(name, sizeof(name), "ctrl%d", ctrl->cntlid); + ctrl->debugfs_dir = debugfs_create_dir(name, parent); + if (IS_ERR(ctrl->debugfs_dir)) { + ret = PTR_ERR(ctrl->debugfs_dir); + ctrl->debugfs_dir = NULL; + return ret; + } + debugfs_create_file("port", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_port_fops); + debugfs_create_file("hostnqn", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_hostnqn_fops); + debugfs_create_file("kato", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_kato_fops); + debugfs_create_file("state", S_IRUSR | S_IWUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_state_fops); + debugfs_create_file("host_traddr", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_host_traddr_fops); +#ifdef CONFIG_NVME_TARGET_TCP_TLS + debugfs_create_file("tls_concat", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_tls_concat_fops); + debugfs_create_file("tls_key", S_IRUSR, ctrl->debugfs_dir, ctrl, + &nvmet_ctrl_tls_key_fops); +#endif + return 0; +} + +void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) +{ + debugfs_remove_recursive(ctrl->debugfs_dir); +} + +int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys) +{ + int ret = 0; + + subsys->debugfs_dir = debugfs_create_dir(subsys->subsysnqn, + nvmet_debugfs); + if (IS_ERR(subsys->debugfs_dir)) { + ret = PTR_ERR(subsys->debugfs_dir); + subsys->debugfs_dir = NULL; + } + return ret; +} + +void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys) +{ + debugfs_remove_recursive(subsys->debugfs_dir); +} + +int __init nvmet_init_debugfs(void) +{ + struct dentry *parent; + + parent = debugfs_create_dir("nvmet", NULL); + if (IS_ERR(parent)) { + pr_warn("%s: failed to create debugfs directory\n", "nvmet"); + return PTR_ERR(parent); + } + nvmet_debugfs = parent; + return 0; +} + +void nvmet_exit_debugfs(void) +{ + debugfs_remove_recursive(nvmet_debugfs); +} diff --git a/drivers/nvme/target/debugfs.h b/drivers/nvme/target/debugfs.h new file mode 100644 index 000000000000..cfb8bbf6a297 --- /dev/null +++ b/drivers/nvme/target/debugfs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DebugFS interface for the NVMe target. + * Copyright (c) 2022-2024 Shadow + * Copyright (c) 2024 SUSE LLC + */ +#ifndef NVMET_DEBUGFS_H +#define NVMET_DEBUGFS_H + +#include <linux/types.h> + +#ifdef CONFIG_NVME_TARGET_DEBUGFS +int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys); +void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys); +int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl); +void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl); + +int __init nvmet_init_debugfs(void); +void nvmet_exit_debugfs(void); +#else +static inline int nvmet_debugfs_subsys_setup(struct nvmet_subsys *subsys) +{ + return 0; +} +static inline void nvmet_debugfs_subsys_free(struct nvmet_subsys *subsys){} + +static inline int nvmet_debugfs_ctrl_setup(struct nvmet_ctrl *ctrl) +{ + return 0; +} +static inline void nvmet_debugfs_ctrl_free(struct nvmet_ctrl *ctrl) {} + +static inline int __init nvmet_init_debugfs(void) +{ + return 0; +} + +static inline void nvmet_exit_debugfs(void) {} + +#endif + +#endif /* NVMET_DEBUGFS_H */ diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index ce54da8c6b36..c06f3e04296c 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -119,7 +119,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); - strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); + strscpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE); } /* @@ -179,7 +179,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (req->cmd->get_log_page.lid != NVME_LOG_DISC) { req->error_loc = offsetof(struct nvme_get_log_page_command, lid); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -187,7 +187,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) if (offset & 0x3) { req->error_loc = offsetof(struct nvme_get_log_page_command, lpo); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -224,6 +224,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) } list_for_each_entry(r, &req->port->referrals, entry) { + if (r->disc_addr.trtype == NVMF_TRTYPE_PCI) + continue; + nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME, r->disc_addr.traddr, @@ -256,7 +259,7 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req) if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) { req->error_loc = offsetof(struct nvme_identify, cns); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -320,7 +323,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } @@ -345,13 +348,27 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_common_command, cdw10); - stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + stat = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } nvmet_req_complete(req, stat); } +u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->common.opcode) { + case nvme_admin_get_log_page: + return nvmet_get_log_page_len(req->cmd); + case nvme_admin_identify: + return NVME_IDENTIFY_DATA_SIZE; + default: + return 0; + } +} + u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -361,7 +378,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } switch (cmd->common.opcode) { @@ -386,7 +403,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) default: pr_debug("unhandled cmd %d\n", cmd->common.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index cb34d644ed08..5946681cb0e3 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -43,8 +43,27 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) data->auth_protocol[0].dhchap.halen, data->auth_protocol[0].dhchap.dhlen); req->sq->dhchap_tid = le16_to_cpu(data->t_id); - if (data->sc_c) - return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + req->sq->sc_c = data->sc_c; + if (data->sc_c != NVME_AUTH_SECP_NOSC) { + if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + /* Secure concatenation can only be enabled on the admin queue */ + if (req->sq->qid) + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + switch (data->sc_c) { + case NVME_AUTH_SECP_NEWTLSPSK: + if (nvmet_queue_tls_keyid(req->sq)) + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + break; + case NVME_AUTH_SECP_REPLACETLSPSK: + if (!nvmet_queue_tls_keyid(req->sq)) + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + break; + default: + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + } + ctrl->concat = true; + } if (data->napd != 1) return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; @@ -103,6 +122,12 @@ static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) nvme_auth_dhgroup_name(fallback_dhgid)); ctrl->dh_gid = fallback_dhgid; } + if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) { + pr_debug("%s: ctrl %d qid %d: NULL DH group invalid " + "for secure channel concatenation\n", __func__, + ctrl->cntlid, req->sq->qid); + return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; + } pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", __func__, ctrl->cntlid, req->sq->qid, nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid); @@ -148,12 +173,22 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) if (memcmp(data->rval, response, data->hl)) { pr_info("ctrl %d qid %d host response mismatch\n", ctrl->cntlid, req->sq->qid); + pr_debug("ctrl %d qid %d rval %*ph\n", + ctrl->cntlid, req->sq->qid, data->hl, data->rval); + pr_debug("ctrl %d qid %d response %*ph\n", + ctrl->cntlid, req->sq->qid, data->hl, response); kfree(response); return NVME_AUTH_DHCHAP_FAILURE_FAILED; } kfree(response); pr_debug("%s: ctrl %d qid %d host authenticated\n", __func__, ctrl->cntlid, req->sq->qid); + if (!data->cvalid && ctrl->concat) { + pr_debug("%s: ctrl %d qid %d invalid challenge\n", + __func__, ctrl->cntlid, req->sq->qid); + return NVME_AUTH_DHCHAP_FAILURE_FAILED; + } + req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); if (data->cvalid) { req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl, GFP_KERNEL); @@ -163,11 +198,23 @@ static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) pr_debug("%s: ctrl %d qid %d challenge %*ph\n", __func__, ctrl->cntlid, req->sq->qid, data->hl, req->sq->dhchap_c2); - } else { + } + /* + * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message + * Sequence Number (SEQNUM): [ .. ] + * The value 0h is used to indicate that bidirectional authentication + * is not performed, but a challenge value C2 is carried in order to + * generate a pre-shared key (PSK) for subsequent establishment of a + * secure channel. + */ + if (req->sq->dhchap_s2 == 0) { + if (ctrl->concat) + nvmet_auth_insert_psk(req->sq); req->sq->authenticated = true; + kfree(req->sq->dhchap_c2); req->sq->dhchap_c2 = NULL; - } - req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); + } else if (!data->cvalid) + req->sq->authenticated = true; return 0; } @@ -179,6 +226,11 @@ static u8 nvmet_auth_failure2(void *d) return data->rescode_exp; } +u32 nvmet_auth_send_data_len(struct nvmet_req *req) +{ + return le32_to_cpu(req->cmd->auth_send.tl); +} + void nvmet_execute_auth_send(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -189,26 +241,26 @@ void nvmet_execute_auth_send(struct nvmet_req *req) u8 dhchap_status; if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, secp); goto done; } if (req->cmd->auth_send.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp0); goto done; } if (req->cmd->auth_send.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, spsp1); goto done; } - tl = le32_to_cpu(req->cmd->auth_send.tl); + tl = nvmet_auth_send_data_len(req); if (!tl) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_send_command, tl); goto done; @@ -241,7 +293,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req) pr_debug("%s: ctrl %d qid %d reset negotiation\n", __func__, ctrl->cntlid, req->sq->qid); if (!req->sq->qid) { - dhchap_status = nvmet_setup_auth(ctrl); + dhchap_status = nvmet_setup_auth(ctrl, req->sq); if (dhchap_status) { pr_err("ctrl %d qid 0 failed to setup re-authentication\n", ctrl->cntlid); @@ -298,6 +350,8 @@ void nvmet_execute_auth_send(struct nvmet_req *req) } goto done_kfree; case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: + if (ctrl->concat) + nvmet_auth_insert_psk(req->sq); req->sq->authenticated = true; pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", __func__, ctrl->cntlid, req->sq->qid); @@ -429,6 +483,11 @@ static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) data->rescode_exp = req->sq->dhchap_status; } +u32 nvmet_auth_receive_data_len(struct nvmet_req *req) +{ + return le32_to_cpu(req->cmd->auth_receive.al); +} + void nvmet_execute_auth_receive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -437,26 +496,26 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) u16 status = 0; if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, secp); goto done; } if (req->cmd->auth_receive.spsp0 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp0); goto done; } if (req->cmd->auth_receive.spsp1 != 0x01) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } - al = le32_to_cpu(req->cmd->auth_receive.al); + al = nvmet_auth_receive_data_len(req); if (!al) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvmf_auth_receive_command, al); goto done; diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index 69d77d34bec1..7b8d8b397802 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -18,7 +18,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) if (req->cmd->prop_set.attrib & 1) { req->error_loc = offsetof(struct nvmf_property_set_command, attrib); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -29,7 +29,7 @@ static void nvmet_execute_prop_set(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvmf_property_set_command, offset); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } out: nvmet_req_complete(req, status); @@ -50,7 +50,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) val = ctrl->cap; break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } else { @@ -64,8 +64,11 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) case NVME_REG_CSTS: val = ctrl->csts; break; + case NVME_REG_CRTO: + val = NVME_CAP_TIMEOUT(ctrl->csts); + break; default: - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; break; } } @@ -82,6 +85,22 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) nvmet_req_complete(req, status); } +u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->fabrics.fctype) { +#ifdef CONFIG_NVME_TARGET_AUTH + case nvme_fabrics_type_auth_send: + return nvmet_auth_send_data_len(req); + case nvme_fabrics_type_auth_receive: + return nvmet_auth_receive_data_len(req); +#endif + default: + return 0; + } +} + u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -105,12 +124,28 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; } +u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->fabrics.fctype) { +#ifdef CONFIG_NVME_TARGET_AUTH + case nvme_fabrics_type_auth_send: + return nvmet_auth_send_data_len(req); + case nvme_fabrics_type_auth_receive: + return nvmet_auth_receive_data_len(req); +#endif + default: + return 0; + } +} + u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -128,7 +163,7 @@ u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) pr_debug("received unknown capsule type 0x%x\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return 0; @@ -147,14 +182,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) pr_warn("queue size zero!\n"); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto err; } if (ctrl->sqs[qid] != NULL) { pr_warn("qid %u has already been created\n", qid); req->error_loc = offsetof(struct nvmf_connect_command, qid); - return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; } /* for fabrics, this value applies to only the I/O Submission Queues */ @@ -163,14 +198,22 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req) sqsize, mqes, ctrl->cntlid); req->error_loc = offsetof(struct nvmf_connect_command, sqsize); req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize); - return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + return NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; } old = cmpxchg(&req->sq->ctrl, NULL, ctrl); if (old) { pr_warn("queue already connected!\n"); req->error_loc = offsetof(struct nvmf_connect_command, opcode); - return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; + } + + kref_get(&ctrl->ref); + old = cmpxchg(&req->cq->ctrl, NULL, ctrl); + if (old) { + pr_warn("queue already connected!\n"); + req->error_loc = offsetof(struct nvmf_connect_command, opcode); + return NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; } /* note: convert queue size from 0's-based value to 1's-based value */ @@ -199,10 +242,26 @@ err: return ret; } -static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl) +static u32 nvmet_connect_result(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) { + bool needs_auth = nvmet_has_auth(ctrl, sq); + key_serial_t keyid = nvmet_queue_tls_keyid(sq); + + /* Do not authenticate I/O queues */ + if (sq->qid) + needs_auth = false; + + if (keyid) + pr_debug("%s: ctrl %d qid %d should %sauthenticate, tls psk %08x\n", + __func__, ctrl->cntlid, sq->qid, + needs_auth ? "" : "not ", keyid); + else + pr_debug("%s: ctrl %d qid %d should %sauthenticate%s\n", + __func__, ctrl->cntlid, sq->qid, + needs_auth ? "" : "not ", + ctrl->concat ? ", secure concatenation" : ""); return (u32)ctrl->cntlid | - (nvmet_has_auth(ctrl) ? NVME_CONNECT_AUTHREQ_ATR : 0); + (needs_auth ? NVME_CONNECT_AUTHREQ_ATR : 0); } static void nvmet_execute_admin_connect(struct nvmet_req *req) @@ -210,75 +269,68 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; - u16 status; - u8 dhchap_status; + struct nvmet_alloc_ctrl_args args = { + .port = req->port, + .sq = req->sq, + .ops = req->ops, + .p2p_client = req->p2p_client, + .kato = le32_to_cpu(c->kato), + }; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { - status = NVME_SC_INTERNAL; + args.status = NVME_SC_INTERNAL; goto complete; } - status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); - if (status) + args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (args.status) goto out; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - req->error_loc = offsetof(struct nvmf_connect_command, recfmt); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + args.error_loc = offsetof(struct nvmf_connect_command, recfmt); + args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); + args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; + args.result = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; - status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, - le32_to_cpu(c->kato), &ctrl); - if (status) - goto out; - uuid_copy(&ctrl->hostid, &d->hostid); + args.subsysnqn = d->subsysnqn; + args.hostnqn = d->hostnqn; + args.hostid = &d->hostid; + args.kato = le32_to_cpu(c->kato); - dhchap_status = nvmet_setup_auth(ctrl); - if (dhchap_status) { - pr_err("Failed to setup authentication, dhchap status %u\n", - dhchap_status); - nvmet_ctrl_put(ctrl); - if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) - status = (NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR); - else - status = NVME_SC_INTERNAL; + ctrl = nvmet_alloc_ctrl(&args); + if (!ctrl) goto out; - } - status = nvmet_install_queue(ctrl, req); - if (status) { + args.status = nvmet_install_queue(ctrl, req); + if (args.status) { nvmet_ctrl_put(ctrl); goto out; } - pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", - nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", - ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, - ctrl->pi_support ? " T10-PI is enabled" : "", - nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); - req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); + args.result = cpu_to_le32(nvmet_connect_result(ctrl, req->sq)); out: kfree(d); complete: - nvmet_req_complete(req, status); + req->error_loc = args.error_loc; + req->cqe->result.u32 = args.result; + nvmet_req_complete(req, args.status); } static void nvmet_execute_io_connect(struct nvmet_req *req) @@ -305,7 +357,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR; + status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } @@ -314,13 +366,13 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn, le16_to_cpu(d->cntlid), req); if (!ctrl) { - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; goto out; } if (unlikely(qid > ctrl->subsys->max_qid)) { pr_warn("invalid queue id (%d)\n", qid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR; + status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid); goto out_ctrl_put; } @@ -330,7 +382,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req) goto out_ctrl_put; pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid); - req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); + req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl, req->sq)); out: kfree(d); complete: @@ -342,6 +394,17 @@ out_ctrl_put: goto out; } +u32 nvmet_connect_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + if (!nvme_is_fabrics(cmd) || + cmd->fabrics.fctype != nvme_fabrics_type_connect) + return 0; + + return sizeof(struct nvmf_connect_data); +} + u16 nvmet_parse_connect_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -350,13 +413,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req) pr_debug("invalid command 0x%x on unconnected queue.\n", cmd->fabrics.opcode); req->error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->fabrics.fctype != nvme_fabrics_type_connect) { pr_debug("invalid capsule type 0x%x on unconnected queue.\n", cmd->fabrics.fctype); req->error_loc = offsetof(struct nvmf_common_command, fctype); - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } if (cmd->connect.qid == 0) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 381b4394731f..0d9784004c9b 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -54,6 +54,8 @@ struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */ int ls_error; struct list_head lsreq_list; /* tgtport->ls_req_list */ bool req_queued; + + struct work_struct put_work; }; @@ -111,8 +113,6 @@ struct nvmet_fc_tgtport { struct nvmet_fc_port_entry *pe; struct kref ref; u32 max_sg_cnt; - - struct work_struct put_work; }; struct nvmet_fc_port_entry { @@ -172,20 +172,6 @@ struct nvmet_fc_tgt_assoc { struct work_struct del_work; }; - -static inline int -nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) -{ - return (iodptr - iodptr->tgtport->iod); -} - -static inline int -nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) -{ - return (fodptr - fodptr->queue->fod); -} - - /* * Association and Connection IDs: * @@ -249,12 +235,13 @@ static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc); static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue); static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue); static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport); -static void nvmet_fc_put_tgtport_work(struct work_struct *work) +static void nvmet_fc_put_lsop_work(struct work_struct *work) { - struct nvmet_fc_tgtport *tgtport = - container_of(work, struct nvmet_fc_tgtport, put_work); + struct nvmet_fc_ls_req_op *lsop = + container_of(work, struct nvmet_fc_ls_req_op, put_work); - nvmet_fc_tgtport_put(tgtport); + nvmet_fc_tgtport_put(lsop->tgtport); + kfree(lsop); } static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport); static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, @@ -381,7 +368,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop) DMA_BIDIRECTIONAL); out_putwork: - queue_work(nvmet_wq, &tgtport->put_work); + queue_work(nvmet_wq, &lsop->put_work); } static int @@ -402,6 +389,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport, lsreq->done = done; lsop->req_queued = false; INIT_LIST_HEAD(&lsop->lsreq_list); + INIT_WORK(&lsop->put_work, nvmet_fc_put_lsop_work); lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, lsreq->rqstlen + lsreq->rsplen, @@ -461,8 +449,6 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) __nvmet_fc_finish_ls_req(lsop); /* fc-nvme target doesn't care about success or failure of cmd */ - - kfree(lsop); } /* @@ -473,7 +459,7 @@ nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status) * down, and the related FC-NVME Association ID and Connection IDs * become invalid. * - * The behavior of the fc-nvme target is such that it's + * The behavior of the fc-nvme target is such that its * understanding of the association and connections will implicitly * be torn down. The action is implicit as it may be due to a loss of * connectivity with the fc-nvme host, so the target may never get a @@ -504,8 +490,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) sizeof(*discon_rqst) + sizeof(*discon_acc) + tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); if (!lsop) { - dev_info(tgtport->dev, - "{%d:%d} send Disconnect Association failed: ENOMEM\n", + pr_info("{%d:%d}: send Disconnect Association failed: ENOMEM\n", tgtport->fc_target_port.port_num, assoc->a_id); return; } @@ -527,8 +512,7 @@ nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc) ret = nvmet_fc_send_ls_req_async(tgtport, lsop, nvmet_fc_disconnect_assoc_done); if (ret) { - dev_info(tgtport->dev, - "{%d:%d} XMT Disconnect Association failed: %d\n", + pr_info("{%d:%d}: XMT Disconnect Association failed: %d\n", tgtport->fc_target_port.port_num, assoc->a_id, ret); kfree(lsop); } @@ -830,7 +814,8 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_fail_iodlist; @@ -840,6 +825,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, return queue; out_fail_iodlist: + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); destroy_workqueue(queue->work_q); out_free_queue: @@ -948,6 +934,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue) flush_workqueue(queue->work_q); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_fc_tgt_q_put(queue); } @@ -1009,16 +996,6 @@ nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport) return kref_get_unless_zero(&hostport->ref); } -static void -nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) -{ - /* if LLDD not implemented, leave as NULL */ - if (!hostport || !hostport->hosthandle) - return; - - nvmet_fc_hostport_put(hostport); -} - static struct nvmet_fc_hostport * nvmet_fc_match_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) { @@ -1042,33 +1019,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) struct nvmet_fc_hostport *newhost, *match = NULL; unsigned long flags; + /* + * Caller holds a reference on tgtport. + */ + /* if LLDD not implemented, leave as NULL */ if (!hosthandle) return NULL; - /* - * take reference for what will be the newly allocated hostport if - * we end up using a new allocation - */ - if (!nvmet_fc_tgtport_get(tgtport)) - return ERR_PTR(-EINVAL); - spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); spin_unlock_irqrestore(&tgtport->lock, flags); - if (match) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (match) return match; - } newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); - if (!newhost) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (!newhost) return ERR_PTR(-ENOMEM); - } spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); @@ -1077,6 +1045,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) kfree(newhost); newhost = match; } else { + nvmet_fc_tgtport_get(tgtport); newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; INIT_LIST_HEAD(&newhost->host_list); @@ -1090,28 +1059,31 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) } static void -nvmet_fc_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) -{ - nvmet_fc_delete_target_assoc(assoc); - nvmet_fc_tgt_a_put(assoc); -} - -static void nvmet_fc_delete_assoc_work(struct work_struct *work) { struct nvmet_fc_tgt_assoc *assoc = container_of(work, struct nvmet_fc_tgt_assoc, del_work); struct nvmet_fc_tgtport *tgtport = assoc->tgtport; - nvmet_fc_delete_assoc(assoc); + nvmet_fc_delete_target_assoc(assoc); + nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgtport_put(tgtport); } static void nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) { + int terminating; + + terminating = atomic_xchg(&assoc->terminating, 1); + + /* if already terminating, do nothing */ + if (terminating) + return; + nvmet_fc_tgtport_get(assoc->tgtport); - queue_work(nvmet_wq, &assoc->del_work); + if (!queue_work(nvmet_wq, &assoc->del_work)) + nvmet_fc_tgtport_put(assoc->tgtport); } static bool @@ -1157,6 +1129,7 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) goto out_ida; assoc->tgtport = tgtport; + nvmet_fc_tgtport_get(tgtport); assoc->a_id = idx; INIT_LIST_HEAD(&assoc->a_list); kref_init(&assoc->ref); @@ -1204,7 +1177,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) /* Send Disconnect now that all i/o has completed */ nvmet_fc_xmt_disconnect_assoc(assoc); - nvmet_fc_free_hostport(assoc->hostport); + nvmet_fc_hostport_put(assoc->hostport); spin_lock_irqsave(&tgtport->lock, flags); oldls = assoc->rcv_disconn; spin_unlock_irqrestore(&tgtport->lock, flags); @@ -1212,8 +1185,7 @@ nvmet_fc_target_assoc_free(struct kref *ref) if (oldls) nvmet_fc_xmt_ls_rsp(tgtport, oldls); ida_free(&tgtport->assoc_cnt, assoc->a_id); - dev_info(tgtport->dev, - "{%d:%d} Association freed\n", + pr_info("{%d:%d}: Association freed\n", tgtport->fc_target_port.port_num, assoc->a_id); kfree(assoc); } @@ -1235,13 +1207,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) { struct nvmet_fc_tgtport *tgtport = assoc->tgtport; unsigned long flags; - int i, terminating; - - terminating = atomic_xchg(&assoc->terminating, 1); - - /* if already terminating, do nothing */ - if (terminating) - return; + int i; spin_lock_irqsave(&tgtport->lock, flags); list_del_rcu(&assoc->a_list); @@ -1255,9 +1221,10 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc) flush_workqueue(assoc->queues[i]->work_q); } - dev_info(tgtport->dev, - "{%d:%d} Association deleted\n", + pr_info("{%d:%d}: Association deleted\n", tgtport->fc_target_port.port_num, assoc->a_id); + + nvmet_fc_tgtport_put(tgtport); } static struct nvmet_fc_tgt_assoc * @@ -1288,6 +1255,7 @@ nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, { lockdep_assert_held(&nvmet_fc_tgtlock); + nvmet_fc_tgtport_get(tgtport); pe->tgtport = tgtport; tgtport->pe = pe; @@ -1307,8 +1275,10 @@ nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) unsigned long flags; spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - if (pe->tgtport) + if (pe->tgtport) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport->pe = NULL; + } list_del(&pe->pe_list); spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1326,8 +1296,10 @@ nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) spin_lock_irqsave(&nvmet_fc_tgtlock, flags); pe = tgtport->pe; - if (pe) + if (pe) { + nvmet_fc_tgtport_put(pe->tgtport); pe->tgtport = NULL; + } tgtport->pe = NULL; spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } @@ -1350,6 +1322,9 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { if (tgtport->fc_target_port.node_name == pe->node_name && tgtport->fc_target_port.port_name == pe->port_name) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + WARN_ON(pe->tgtport); tgtport->pe = pe; pe->tgtport = tgtport; @@ -1362,7 +1337,7 @@ nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) /** * nvmet_fc_register_targetport - transport entry point called by an * LLDD to register the existence of a local - * NVME subystem FC port. + * NVME subsystem FC port. * @pinfo: pointer to information about the port to be registered * @template: LLDD entrypoints and operational parameters for the port * @dev: physical hardware device node port corresponds to. Will be @@ -1433,7 +1408,6 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, kref_init(&newrec->ref); ida_init(&newrec->assoc_cnt); newrec->max_sg_cnt = template->max_sgl_segments; - INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); ret = nvmet_fc_alloc_ls_iodlist(newrec); if (ret) { @@ -1469,11 +1443,6 @@ nvmet_fc_free_tgtport(struct kref *ref) struct nvmet_fc_tgtport *tgtport = container_of(ref, struct nvmet_fc_tgtport, ref); struct device *dev = tgtport->dev; - unsigned long flags; - - spin_lock_irqsave(&nvmet_fc_tgtlock, flags); - list_del(&tgtport->tgt_list); - spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_free_ls_iodlist(tgtport); @@ -1619,6 +1588,39 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); } +static void +nvmet_fc_free_pending_reqs(struct nvmet_fc_tgtport *tgtport) +{ + struct nvmet_fc_ls_req_op *lsop; + struct nvmefc_ls_req *lsreq; + struct nvmet_fc_ls_iod *iod; + int i; + + iod = tgtport->iod; + for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) + cancel_work(&iod->work); + + /* + * After this point the connection is lost and thus any pending + * request can't be processed by the normal completion path. This + * is likely a request from nvmet_fc_send_ls_req_async. + */ + while ((lsop = list_first_entry_or_null(&tgtport->ls_req_list, + struct nvmet_fc_ls_req_op, lsreq_list))) { + list_del(&lsop->lsreq_list); + + if (!lsop->req_queued) + continue; + + lsreq = &lsop->ls_req; + fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, + (lsreq->rqstlen + lsreq->rsplen), + DMA_BIDIRECTIONAL); + nvmet_fc_tgtport_put(tgtport); + kfree(lsop); + } +} + /** * nvmet_fc_unregister_targetport - transport entry point called by an * LLDD to deregister/remove a previously @@ -1634,6 +1636,11 @@ int nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) { struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + list_del(&tgtport->tgt_list); + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind_tgt(tgtport); @@ -1642,13 +1649,7 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) flush_workqueue(nvmet_wq); - /* - * should terminate LS's as well. However, LS's will be generated - * at the tail end of association termination, so they likely don't - * exist yet. And even if they did, it's worthwhile to just let - * them finish and targetport ref counting will clean things up. - */ - + nvmet_fc_free_pending_reqs(tgtport); nvmet_fc_tgtport_put(tgtport); return 0; @@ -1711,9 +1712,9 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, } if (ret) { - dev_err(tgtport->dev, - "Create Association LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Create Association LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, FCNVME_RJT_RC_LOGIC, @@ -1725,8 +1726,7 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, atomic_set(&queue->connected, 1); queue->sqhd = 0; /* best place to init value */ - dev_info(tgtport->dev, - "{%d:%d} Association created\n", + pr_info("{%d:%d}: Association created\n", tgtport->fc_target_port.port_num, iod->assoc->a_id); /* format a response */ @@ -1804,9 +1804,9 @@ nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport, } if (ret) { - dev_err(tgtport->dev, - "Create Connection LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Create Connection LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? @@ -1866,9 +1866,9 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, } if (ret || !assoc) { - dev_err(tgtport->dev, - "Disconnect LS failed: %s\n", - validation_errors[ret]); + pr_err("{%d}: Disconnect LS failed: %s\n", + tgtport->fc_target_port.port_num, + validation_errors[ret]); iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, sizeof(*acc), rqst->w0.ls_cmd, (ret == VERR_NO_ASSOC) ? @@ -1902,8 +1902,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, spin_unlock_irqrestore(&tgtport->lock, flags); if (oldls) { - dev_info(tgtport->dev, - "{%d:%d} Multiple Disconnect Association LS's " + pr_info("{%d:%d}: Multiple Disconnect Association LS's " "received\n", tgtport->fc_target_port.port_num, assoc->a_id); /* overwrite good response with bogus failure */ @@ -2046,8 +2045,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf; if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) { - dev_info(tgtport->dev, - "RCV %s LS failed: payload too large (%d)\n", + pr_info("{%d}: RCV %s LS failed: payload too large (%d)\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : "", lsreqbuf_len); @@ -2055,8 +2054,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, } if (!nvmet_fc_tgtport_get(tgtport)) { - dev_info(tgtport->dev, - "RCV %s LS failed: target deleting\n", + pr_info("{%d}: RCV %s LS failed: target deleting\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); return -ESHUTDOWN; @@ -2064,8 +2063,8 @@ nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port, iod = nvmet_fc_alloc_ls_iod(tgtport); if (!iod) { - dev_info(tgtport->dev, - "RCV %s LS failed: context allocation failed\n", + pr_info("{%d}: RCV %s LS failed: context allocation failed\n", + tgtport->fc_target_port.port_num, (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? nvmefc_ls_names[w0->ls_cmd] : ""); nvmet_fc_tgtport_put(tgtport); @@ -2309,7 +2308,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport, ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); if (ret) { /* - * should be ok to set w/o lock as its in the thread of + * should be ok to set w/o lock as it's in the thread of * execution (not an async timer routine) and doesn't * contend with any clearing action */ @@ -2565,10 +2564,8 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, fod->data_sg = NULL; fod->data_sg_cnt = 0; - ret = nvmet_req_init(&fod->req, - &fod->queue->nvme_cq, - &fod->queue->nvme_sq, - &nvmet_fc_tgt_fcp_ops); + ret = nvmet_req_init(&fod->req, &fod->queue->nvme_sq, + &nvmet_fc_tgt_fcp_ops); if (!ret) { /* bad SQE content or invalid ctrl state */ /* nvmet layer has already called op done to send rsp. */ @@ -2627,7 +2624,7 @@ transport_error: * and the api of the FC LLDD which may issue a hw command to send the * response, but the LLDD may not get the hw completion for that command * and upcall the nvmet_fc layer before a new command may be - * asynchronously received - its possible for a command to be received + * asynchronously received - it's possible for a command to be received * before the LLDD and nvmet_fc have recycled the job structure. It gives * the appearance of more commands received than fits in the sq. * To alleviate this scenario, a temporary queue is maintained in the @@ -2894,12 +2891,17 @@ nvmet_fc_add_port(struct nvmet_port *port) list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { if ((tgtport->fc_target_port.node_name == traddr.nn) && (tgtport->fc_target_port.port_name == traddr.pn)) { + if (!nvmet_fc_tgtport_get(tgtport)) + continue; + /* a FC port can only be 1 nvmet port id */ if (!tgtport->pe) { nvmet_fc_portentry_bind(tgtport, pe, port); ret = 0; } else ret = -EALREADY; + + nvmet_fc_tgtport_put(tgtport); break; } } @@ -2915,11 +2917,21 @@ static void nvmet_fc_remove_port(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); nvmet_fc_portentry_unbind(pe); - /* terminate any outstanding associations */ - __nvmet_fc_free_assocs(pe->tgtport); + if (tgtport) { + /* terminate any outstanding associations */ + __nvmet_fc_free_assocs(tgtport); + nvmet_fc_tgtport_put(tgtport); + } kfree(pe); } @@ -2928,10 +2940,53 @@ static void nvmet_fc_discovery_chg(struct nvmet_port *port) { struct nvmet_fc_port_entry *pe = port->priv; - struct nvmet_fc_tgtport *tgtport = pe->tgtport; + struct nvmet_fc_tgtport *tgtport = NULL; + unsigned long flags; + + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); + if (pe->tgtport && nvmet_fc_tgtport_get(pe->tgtport)) + tgtport = pe->tgtport; + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); + + if (!tgtport) + return; if (tgtport && tgtport->ops->discovery_event) tgtport->ops->discovery_event(&tgtport->fc_target_port); + + nvmet_fc_tgtport_put(tgtport); +} + +static ssize_t +nvmet_fc_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_size) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_fc_tgt_queue *queue = + container_of(sq, struct nvmet_fc_tgt_queue, nvme_sq); + struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; + struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; + u64 wwnn, wwpn; + ssize_t ret = 0; + + if (!tgtport || !nvmet_fc_tgtport_get(tgtport)) + return -ENODEV; + if (!hostport || !nvmet_fc_hostport_get(hostport)) { + ret = -ENODEV; + goto out_put; + } + + if (tgtport->ops->host_traddr) { + ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); + if (ret) + goto out_put_host; + ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); + } +out_put_host: + nvmet_fc_hostport_put(hostport); +out_put: + nvmet_fc_tgtport_put(tgtport); + return ret; } static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { @@ -2943,6 +2998,7 @@ static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { .queue_response = nvmet_fc_fcp_nvme_cmd_done, .delete_ctrl = nvmet_fc_delete_ctrl, .discovery_chg = nvmet_fc_discovery_chg, + .host_traddr = nvmet_fc_host_traddr, }; static int __init nvmet_fc_init_module(void) diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index 913cd2ec7a6f..c30e9a3e014f 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -207,13 +207,16 @@ static LIST_HEAD(fcloop_nports); struct fcloop_lport { struct nvme_fc_local_port *localport; struct list_head lport_list; - struct completion unreg_done; + refcount_t ref; }; struct fcloop_lport_priv { struct fcloop_lport *lport; }; +/* The port is already being removed, avoid double free */ +#define PORT_DELETED 0 + struct fcloop_rport { struct nvme_fc_remote_port *remoteport; struct nvmet_fc_target_port *targetport; @@ -222,6 +225,7 @@ struct fcloop_rport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_tport { @@ -232,6 +236,7 @@ struct fcloop_tport { spinlock_t lock; struct list_head ls_list; struct work_struct ls_work; + unsigned long flags; }; struct fcloop_nport { @@ -239,7 +244,7 @@ struct fcloop_nport { struct fcloop_tport *tport; struct fcloop_lport *lport; struct list_head nport_list; - struct kref ref; + refcount_t ref; u64 node_name; u64 port_name; u32 port_role; @@ -249,7 +254,6 @@ struct fcloop_nport { struct fcloop_lsreq { struct nvmefc_ls_req *lsreq; struct nvmefc_ls_rsp ls_rsp; - int lsdir; /* H2T or T2H */ int status; struct list_head ls_list; /* fcloop_rport->ls_list */ }; @@ -274,7 +278,7 @@ struct fcloop_fcpreq { u32 inistate; bool active; bool aborted; - struct kref ref; + refcount_t ref; struct work_struct fcp_rcv_work; struct work_struct abort_rcv_work; struct work_struct tio_done_work; @@ -287,6 +291,9 @@ struct fcloop_ini_fcpreq { spinlock_t inilock; }; +/* SLAB cache for fcloop_lsreq structures */ +static struct kmem_cache *lsreq_cache; + static inline struct fcloop_lsreq * ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp) { @@ -337,6 +344,7 @@ fcloop_rport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&rport->lock); } @@ -348,10 +356,13 @@ fcloop_h2t_ls_req(struct nvme_fc_local_port *localport, struct nvme_fc_remote_port *remoteport, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_rport *rport = remoteport->private; + struct fcloop_lsreq *tls_req; int ret = 0; + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -388,14 +399,17 @@ fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport, lsrsp->done(lsrsp); - if (remoteport) { - rport = remoteport->private; - spin_lock(&rport->lock); - list_add_tail(&tls_req->ls_list, &rport->ls_list); - spin_unlock(&rport->lock); - queue_work(nvmet_wq, &rport->ls_work); + if (!remoteport) { + kmem_cache_free(lsreq_cache, tls_req); + return 0; } + rport = remoteport->private; + spin_lock(&rport->lock); + list_add_tail(&tls_req->ls_list, &rport->ls_list); + spin_unlock(&rport->lock); + queue_work(nvmet_wq, &rport->ls_work); + return 0; } @@ -421,6 +435,7 @@ fcloop_tport_lsrqst_work(struct work_struct *work) * callee may free memory containing tls_req. * do not reference lsreq after this. */ + kmem_cache_free(lsreq_cache, tls_req); spin_lock(&tport->lock); } @@ -431,8 +446,8 @@ static int fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, struct nvmefc_ls_req *lsreq) { - struct fcloop_lsreq *tls_req = lsreq->private; struct fcloop_tport *tport = targetport->private; + struct fcloop_lsreq *tls_req; int ret = 0; /* @@ -440,6 +455,10 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, * hosthandle ignored as fcloop currently is * 1:1 tgtport vs remoteport */ + + tls_req = kmem_cache_alloc(lsreq_cache, GFP_KERNEL); + if (!tls_req) + return -ENOMEM; tls_req->lsreq = lsreq; INIT_LIST_HEAD(&tls_req->ls_list); @@ -456,6 +475,9 @@ fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle, ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp, lsreq->rqstaddr, lsreq->rqstlen); + if (ret) + kmem_cache_free(lsreq_cache, tls_req); + return ret; } @@ -470,18 +492,32 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, struct nvmet_fc_target_port *targetport = rport->targetport; struct fcloop_tport *tport; + if (!targetport) { + /* + * The target port is gone. The target doesn't expect any + * response anymore and thus lsreq can't be accessed anymore. + * + * We end up here from delete association exchange: + * nvmet_fc_xmt_disconnect_assoc sends an async request. + * + * Return success because this is what LLDDs do; silently + * drop the response. + */ + lsrsp->done(lsrsp); + kmem_cache_free(lsreq_cache, tls_req); + return 0; + } + memcpy(lsreq->rspaddr, lsrsp->rspbuf, ((lsreq->rsplen < lsrsp->rsplen) ? lsreq->rsplen : lsrsp->rsplen)); lsrsp->done(lsrsp); - if (targetport) { - tport = targetport->private; - spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); - spin_unlock(&tport->lock); - queue_work(nvmet_wq, &tport->ls_work); - } + tport = targetport->private; + spin_lock(&tport->lock); + list_add_tail(&tls_req->ls_list, &tport->ls_list); + spin_unlock(&tport->lock); + queue_work(nvmet_wq, &tport->ls_work); return 0; } @@ -492,6 +528,16 @@ fcloop_t2h_host_release(void *hosthandle) /* host handle ignored for now */ } +static int +fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn) +{ + struct fcloop_rport *rport = hosthandle; + + *wwnn = rport->lport->localport->node_name; + *wwpn = rport->lport->localport->port_name; + return 0; +} + /* * Simulate reception of RSCN and converting it to a initiator transport * call to rescan a remote port. @@ -524,24 +570,18 @@ fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport) } static void -fcloop_tfcp_req_free(struct kref *ref) +fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) { - struct fcloop_fcpreq *tfcp_req = - container_of(ref, struct fcloop_fcpreq, ref); + if (!refcount_dec_and_test(&tfcp_req->ref)) + return; kfree(tfcp_req); } -static void -fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) -{ - kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); -} - static int fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) { - return kref_get_unless_zero(&tfcp_req->ref); + return refcount_inc_not_zero(&tfcp_req->ref); } static void @@ -561,7 +601,8 @@ fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq, } /* release original io reference on tgt struct */ - fcloop_tfcp_req_put(tfcp_req); + if (tfcp_req) + fcloop_tfcp_req_put(tfcp_req); } static bool drop_fabric_opcode; @@ -613,12 +654,13 @@ fcloop_fcp_recv_work(struct work_struct *work) { struct fcloop_fcpreq *tfcp_req = container_of(work, struct fcloop_fcpreq, fcp_rcv_work); - struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; + struct nvmefc_fcp_req *fcpreq; unsigned long flags; int ret = 0; bool aborted = false; spin_lock_irqsave(&tfcp_req->reqlock, flags); + fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_START: tfcp_req->inistate = INI_IO_ACTIVE; @@ -633,16 +675,19 @@ fcloop_fcp_recv_work(struct work_struct *work) } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - if (unlikely(aborted)) - ret = -ECANCELED; - else { - if (likely(!check_for_drop(tfcp_req))) - ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, - &tfcp_req->tgt_fcp_req, - fcpreq->cmdaddr, fcpreq->cmdlen); - else - pr_info("%s: dropped command ********\n", __func__); + if (unlikely(aborted)) { + /* the abort handler will call fcloop_call_host_done */ + return; } + + if (unlikely(check_for_drop(tfcp_req))) { + pr_info("%s: dropped command ********\n", __func__); + return; + } + + ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, + &tfcp_req->tgt_fcp_req, + fcpreq->cmdaddr, fcpreq->cmdlen); if (ret) fcloop_call_host_done(fcpreq, tfcp_req, ret); } @@ -657,15 +702,17 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) unsigned long flags; spin_lock_irqsave(&tfcp_req->reqlock, flags); - fcpreq = tfcp_req->fcpreq; switch (tfcp_req->inistate) { case INI_IO_ABORTED: + fcpreq = tfcp_req->fcpreq; + tfcp_req->fcpreq = NULL; break; case INI_IO_COMPLETED: completed = true; break; default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); + fcloop_tfcp_req_put(tfcp_req); WARN_ON(1); return; } @@ -681,10 +728,6 @@ fcloop_fcp_abort_recv_work(struct work_struct *work) nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, &tfcp_req->tgt_fcp_req); - spin_lock_irqsave(&tfcp_req->reqlock, flags); - tfcp_req->fcpreq = NULL; - spin_unlock_irqrestore(&tfcp_req->reqlock, flags); - fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); /* call_host_done releases reference for abort downcall */ } @@ -738,7 +781,7 @@ fcloop_fcp_req(struct nvme_fc_local_port *localport, INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); - kref_init(&tfcp_req->ref); + refcount_set(&tfcp_req->ref, 1); queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); @@ -953,13 +996,16 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, spin_lock(&inireq->inilock); tfcp_req = inireq->tfcp_req; - if (tfcp_req) - fcloop_tfcp_req_get(tfcp_req); + if (tfcp_req) { + if (!fcloop_tfcp_req_get(tfcp_req)) + tfcp_req = NULL; + } spin_unlock(&inireq->inilock); - if (!tfcp_req) + if (!tfcp_req) { /* abort has already been called */ - return; + goto out_host_done; + } /* break initiator/target relationship for io */ spin_lock_irqsave(&tfcp_req->reqlock, flags); @@ -974,7 +1020,7 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, default: spin_unlock_irqrestore(&tfcp_req->reqlock, flags); WARN_ON(1); - return; + goto out_host_done; } spin_unlock_irqrestore(&tfcp_req->reqlock, flags); @@ -988,27 +1034,56 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport, */ fcloop_tfcp_req_put(tfcp_req); } + + return; + +out_host_done: + fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); } static void -fcloop_nport_free(struct kref *ref) +fcloop_lport_put(struct fcloop_lport *lport) { - struct fcloop_nport *nport = - container_of(ref, struct fcloop_nport, ref); + unsigned long flags; - kfree(nport); + if (!refcount_dec_and_test(&lport->ref)) + return; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&lport->lport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + kfree(lport); +} + +static int +fcloop_lport_get(struct fcloop_lport *lport) +{ + return refcount_inc_not_zero(&lport->ref); } static void fcloop_nport_put(struct fcloop_nport *nport) { - kref_put(&nport->ref, fcloop_nport_free); + unsigned long flags; + + if (!refcount_dec_and_test(&nport->ref)) + return; + + spin_lock_irqsave(&fcloop_lock, flags); + list_del(&nport->nport_list); + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (nport->lport) + fcloop_lport_put(nport->lport); + + kfree(nport); } static int fcloop_nport_get(struct fcloop_nport *nport) { - return kref_get_unless_zero(&nport->ref); + return refcount_inc_not_zero(&nport->ref); } static void @@ -1017,26 +1092,49 @@ fcloop_localport_delete(struct nvme_fc_local_port *localport) struct fcloop_lport_priv *lport_priv = localport->private; struct fcloop_lport *lport = lport_priv->lport; - /* release any threads waiting for the unreg to complete */ - complete(&lport->unreg_done); + fcloop_lport_put(lport); } static void fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport) { struct fcloop_rport *rport = remoteport->private; + bool put_port = false; + unsigned long flags; flush_work(&rport->ls_work); - fcloop_nport_put(rport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &rport->flags)) + put_port = true; + rport->nport->rport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) { + WARN_ON(!list_empty(&rport->ls_list)); + fcloop_nport_put(rport->nport); + } } static void fcloop_targetport_delete(struct nvmet_fc_target_port *targetport) { struct fcloop_tport *tport = targetport->private; + bool put_port = false; + unsigned long flags; flush_work(&tport->ls_work); - fcloop_nport_put(tport->nport); + + spin_lock_irqsave(&fcloop_lock, flags); + if (!test_and_set_bit(PORT_DELETED, &tport->flags)) + put_port = true; + tport->nport->tport = NULL; + spin_unlock_irqrestore(&fcloop_lock, flags); + + if (put_port) { + WARN_ON(!list_empty(&tport->ls_list)); + fcloop_nport_put(tport->nport); + } } #define FCLOOP_HW_QUEUES 4 @@ -1060,7 +1158,6 @@ static struct nvme_fc_port_template fctemplate = { /* sizes of additional private data for data structures */ .local_priv_sz = sizeof(struct fcloop_lport_priv), .remote_priv_sz = sizeof(struct fcloop_rport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq), }; @@ -1074,6 +1171,7 @@ static struct nvmet_fc_target_template tgttemplate = { .ls_req = fcloop_t2h_ls_req, .ls_abort = fcloop_t2h_ls_abort, .host_release = fcloop_t2h_host_release, + .host_traddr = fcloop_t2h_host_traddr, .max_hw_queues = FCLOOP_HW_QUEUES, .max_sgl_segments = FCLOOP_SGL_SEGS, .max_dif_sgl_segments = FCLOOP_SGL_SEGS, @@ -1082,7 +1180,6 @@ static struct nvmet_fc_target_template tgttemplate = { .target_features = 0, /* sizes of additional private data for data structures */ .target_priv_sz = sizeof(struct fcloop_tport), - .lsrqst_priv_sz = sizeof(struct fcloop_lsreq), }; static ssize_t @@ -1129,6 +1226,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr, lport->localport = localport; INIT_LIST_HEAD(&lport->lport_list); + refcount_set(&lport->ref, 1); spin_lock_irqsave(&fcloop_lock, flags); list_add_tail(&lport->lport_list, &fcloop_lports); @@ -1145,60 +1243,94 @@ out_free_lport: return ret ? ret : count; } +static int +__localport_unreg(struct fcloop_lport *lport) +{ + return nvme_fc_unregister_localport(lport->localport); +} -static void -__unlink_local_port(struct fcloop_lport *lport) +static struct fcloop_nport * +__fcloop_nport_lookup(u64 node_name, u64 port_name) { - list_del(&lport->lport_list); + struct fcloop_nport *nport; + + list_for_each_entry(nport, &fcloop_nports, nport_list) { + if (nport->node_name != node_name || + nport->port_name != port_name) + continue; + + if (fcloop_nport_get(nport)) + return nport; + + break; + } + + return NULL; } -static int -__wait_localport_unreg(struct fcloop_lport *lport) +static struct fcloop_nport * +fcloop_nport_lookup(u64 node_name, u64 port_name) { - int ret; + struct fcloop_nport *nport; + unsigned long flags; - init_completion(&lport->unreg_done); + spin_lock_irqsave(&fcloop_lock, flags); + nport = __fcloop_nport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); - ret = nvme_fc_unregister_localport(lport->localport); + return nport; +} - if (!ret) - wait_for_completion(&lport->unreg_done); +static struct fcloop_lport * +__fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; - kfree(lport); + list_for_each_entry(lport, &fcloop_lports, lport_list) { + if (lport->localport->node_name != node_name || + lport->localport->port_name != port_name) + continue; - return ret; + if (fcloop_lport_get(lport)) + return lport; + + break; + } + + return NULL; } +static struct fcloop_lport * +fcloop_lport_lookup(u64 node_name, u64 port_name) +{ + struct fcloop_lport *lport; + unsigned long flags; + + spin_lock_irqsave(&fcloop_lock, flags); + lport = __fcloop_lport_lookup(node_name, port_name); + spin_unlock_irqrestore(&fcloop_lock, flags); + + return lport; +} static ssize_t fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_lport *tlport, *lport = NULL; + struct fcloop_lport *lport; u64 nodename, portname; - unsigned long flags; int ret; ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf); if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tlport, &fcloop_lports, lport_list) { - if (tlport->localport->node_name == nodename && - tlport->localport->port_name == portname) { - lport = tlport; - __unlink_local_port(lport); - break; - } - } - spin_unlock_irqrestore(&fcloop_lock, flags); - + lport = fcloop_lport_lookup(nodename, portname); if (!lport) return -ENOENT; - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); + fcloop_lport_put(lport); return ret ? ret : count; } @@ -1206,8 +1338,8 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr, static struct fcloop_nport * fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) { - struct fcloop_nport *newnport, *nport = NULL; - struct fcloop_lport *tmplport, *lport = NULL; + struct fcloop_nport *newnport, *nport; + struct fcloop_lport *lport; struct fcloop_ctrl_options *opts; unsigned long flags; u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS; @@ -1222,10 +1354,8 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) goto out_free_opts; /* everything there ? */ - if ((opts->mask & opts_mask) != opts_mask) { - ret = -EINVAL; + if ((opts->mask & opts_mask) != opts_mask) goto out_free_opts; - } newnport = kzalloc(sizeof(*newnport), GFP_KERNEL); if (!newnport) @@ -1238,63 +1368,64 @@ fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) newnport->port_role = opts->roles; if (opts->mask & NVMF_OPT_FCADDR) newnport->port_id = opts->fcaddr; - kref_init(&newnport->ref); + refcount_set(&newnport->ref, 1); spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmplport, &fcloop_lports, lport_list) { - if (tmplport->localport->node_name == opts->wwnn && - tmplport->localport->port_name == opts->wwpn) - goto out_invalid_opts; - - if (tmplport->localport->node_name == opts->lpwwnn && - tmplport->localport->port_name == opts->lpwwpn) - lport = tmplport; + lport = __fcloop_lport_lookup(opts->wwnn, opts->wwpn); + if (lport) { + /* invalid configuration */ + fcloop_lport_put(lport); + goto out_free_newnport; } if (remoteport) { - if (!lport) - goto out_invalid_opts; - newnport->lport = lport; - } - - list_for_each_entry(nport, &fcloop_nports, nport_list) { - if (nport->node_name == opts->wwnn && - nport->port_name == opts->wwpn) { - if ((remoteport && nport->rport) || - (!remoteport && nport->tport)) { - nport = NULL; - goto out_invalid_opts; - } - - fcloop_nport_get(nport); - - spin_unlock_irqrestore(&fcloop_lock, flags); - - if (remoteport) - nport->lport = lport; - if (opts->mask & NVMF_OPT_ROLES) - nport->port_role = opts->roles; - if (opts->mask & NVMF_OPT_FCADDR) - nport->port_id = opts->fcaddr; + lport = __fcloop_lport_lookup(opts->lpwwnn, opts->lpwwpn); + if (!lport) { + /* invalid configuration */ goto out_free_newnport; } } - list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = __fcloop_nport_lookup(opts->wwnn, opts->wwpn); + if (nport) { + if ((remoteport && nport->rport) || + (!remoteport && nport->tport)) { + /* invalid configuration */ + goto out_put_nport; + } + /* found existing nport, discard the new nport */ + kfree(newnport); + } else { + list_add_tail(&newnport->nport_list, &fcloop_nports); + nport = newnport; + } + + if (opts->mask & NVMF_OPT_ROLES) + nport->port_role = opts->roles; + if (opts->mask & NVMF_OPT_FCADDR) + nport->port_id = opts->fcaddr; + if (lport) { + if (!nport->lport) + nport->lport = lport; + else + fcloop_lport_put(lport); + } spin_unlock_irqrestore(&fcloop_lock, flags); kfree(opts); - return newnport; + return nport; -out_invalid_opts: - spin_unlock_irqrestore(&fcloop_lock, flags); +out_put_nport: + if (lport) + fcloop_lport_put(lport); + fcloop_nport_put(nport); out_free_newnport: + spin_unlock_irqrestore(&fcloop_lock, flags); kfree(newnport); out_free_opts: kfree(opts); - return nport; + return NULL; } static ssize_t @@ -1335,6 +1466,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr, rport->nport = nport; rport->lport = nport->lport; nport->rport = rport; + rport->flags = 0; spin_lock_init(&rport->lock); INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work); INIT_LIST_HEAD(&rport->ls_list); @@ -1348,21 +1480,18 @@ __unlink_remote_port(struct fcloop_nport *nport) { struct fcloop_rport *rport = nport->rport; + lockdep_assert_held(&fcloop_lock); + if (rport && nport->tport) nport->tport->remoteport = NULL; nport->rport = NULL; - list_del(&nport->nport_list); - return rport; } static int __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) { - if (!rport) - return -EALREADY; - return nvme_fc_unregister_remoteport(rport->remoteport); } @@ -1370,8 +1499,8 @@ static ssize_t fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - static struct fcloop_rport *rport; + struct fcloop_nport *nport; + struct fcloop_rport *rport; u64 nodename, portname; unsigned long flags; int ret; @@ -1380,24 +1509,24 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->rport) { - nport = tmpport; - rport = __unlink_remote_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + rport = __unlink_remote_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!rport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __remoteport_unreg(nport, rport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1435,6 +1564,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr, tport->nport = nport; tport->lport = nport->lport; nport->tport = tport; + tport->flags = 0; spin_lock_init(&tport->lock); INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work); INIT_LIST_HEAD(&tport->ls_list); @@ -1448,6 +1578,8 @@ __unlink_target_port(struct fcloop_nport *nport) { struct fcloop_tport *tport = nport->tport; + lockdep_assert_held(&fcloop_lock); + if (tport && nport->rport) nport->rport->targetport = NULL; nport->tport = NULL; @@ -1458,9 +1590,6 @@ __unlink_target_port(struct fcloop_nport *nport) static int __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) { - if (!tport) - return -EALREADY; - return nvmet_fc_unregister_targetport(tport->targetport); } @@ -1468,8 +1597,8 @@ static ssize_t fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct fcloop_nport *nport = NULL, *tmpport; - struct fcloop_tport *tport = NULL; + struct fcloop_nport *nport; + struct fcloop_tport *tport; u64 nodename, portname; unsigned long flags; int ret; @@ -1478,24 +1607,24 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr, if (ret) return ret; - spin_lock_irqsave(&fcloop_lock, flags); - - list_for_each_entry(tmpport, &fcloop_nports, nport_list) { - if (tmpport->node_name == nodename && - tmpport->port_name == portname && tmpport->tport) { - nport = tmpport; - tport = __unlink_target_port(nport); - break; - } - } + nport = fcloop_nport_lookup(nodename, portname); + if (!nport) + return -ENOENT; + spin_lock_irqsave(&fcloop_lock, flags); + tport = __unlink_target_port(nport); spin_unlock_irqrestore(&fcloop_lock, flags); - if (!nport) - return -ENOENT; + if (!tport) { + ret = -ENOENT; + goto out_nport_put; + } ret = __targetport_unreg(nport, tport); +out_nport_put: + fcloop_nport_put(nport); + return ret ? ret : count; } @@ -1561,15 +1690,20 @@ static const struct class fcloop_class = { }; static struct device *fcloop_device; - static int __init fcloop_init(void) { int ret; + lsreq_cache = kmem_cache_create("lsreq_cache", + sizeof(struct fcloop_lsreq), 0, + 0, NULL); + if (!lsreq_cache) + return -ENOMEM; + ret = class_register(&fcloop_class); if (ret) { pr_err("couldn't register class fcloop\n"); - return ret; + goto out_destroy_cache; } fcloop_device = device_create_with_groups( @@ -1587,13 +1721,15 @@ static int __init fcloop_init(void) out_destroy_class: class_unregister(&fcloop_class); +out_destroy_cache: + kmem_cache_destroy(lsreq_cache); return ret; } static void __exit fcloop_exit(void) { - struct fcloop_lport *lport = NULL; - struct fcloop_nport *nport = NULL; + struct fcloop_lport *lport; + struct fcloop_nport *nport; struct fcloop_tport *tport; struct fcloop_rport *rport; unsigned long flags; @@ -1604,7 +1740,7 @@ static void __exit fcloop_exit(void) for (;;) { nport = list_first_entry_or_null(&fcloop_nports, typeof(*nport), nport_list); - if (!nport) + if (!nport || !fcloop_nport_get(nport)) break; tport = __unlink_target_port(nport); @@ -1612,13 +1748,21 @@ static void __exit fcloop_exit(void) spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __targetport_unreg(nport, tport); - if (ret) - pr_warn("%s: Failed deleting target port\n", __func__); + if (tport) { + ret = __targetport_unreg(nport, tport); + if (ret) + pr_warn("%s: Failed deleting target port\n", + __func__); + } - ret = __remoteport_unreg(nport, rport); - if (ret) - pr_warn("%s: Failed deleting remote port\n", __func__); + if (rport) { + ret = __remoteport_unreg(nport, rport); + if (ret) + pr_warn("%s: Failed deleting remote port\n", + __func__); + } + + fcloop_nport_put(nport); spin_lock_irqsave(&fcloop_lock, flags); } @@ -1626,17 +1770,17 @@ static void __exit fcloop_exit(void) for (;;) { lport = list_first_entry_or_null(&fcloop_lports, typeof(*lport), lport_list); - if (!lport) + if (!lport || !fcloop_lport_get(lport)) break; - __unlink_local_port(lport); - spin_unlock_irqrestore(&fcloop_lock, flags); - ret = __wait_localport_unreg(lport); + ret = __localport_unreg(lport); if (ret) pr_warn("%s: Failed deleting local port\n", __func__); + fcloop_lport_put(lport); + spin_lock_irqsave(&fcloop_lock, flags); } @@ -1646,6 +1790,7 @@ static void __exit fcloop_exit(void) device_destroy(&fcloop_class, MKDEV(0, 0)); class_unregister(&fcloop_class); + kmem_cache_destroy(lsreq_cache); } module_init(fcloop_init); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 6426aac2634a..8d246b8ca604 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ - id->npwg = lpp0b; + id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ @@ -46,6 +46,10 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) id->npda = id->npdg; /* NOWS = Namespace Optimal Write Size */ id->nows = to0based(bdev_io_opt(bdev) / bdev_logical_block_size(bdev)); + + /* Set WZDS and DRB if device supports unmapped write zeroes */ + if (bdev_write_zeroes_unmap_sectors(bdev)) + id->dlfeat = (1 << 3) | 0x1; } void nvmet_bdev_ns_disable(struct nvmet_ns *ns) @@ -61,15 +65,17 @@ static void nvmet_bdev_ns_enable_integrity(struct nvmet_ns *ns) { struct blk_integrity *bi = bdev_get_integrity(ns->bdev); - if (bi) { - ns->metadata_size = bi->tuple_size; - if (bi->profile == &t10_pi_type1_crc) + if (!bi) + return; + + if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC) { + ns->metadata_size = bi->metadata_size; + if (bi->flags & BLK_INTEGRITY_REF_TAG) ns->pi_type = NVME_NS_DPS_PI_TYPE1; - else if (bi->profile == &t10_pi_type3_crc) - ns->pi_type = NVME_NS_DPS_PI_TYPE3; else - /* Unsupported metadata type */ - ns->metadata_size = 0; + ns->pi_type = NVME_NS_DPS_PI_TYPE3; + } else { + ns->metadata_size = 0; } } @@ -102,7 +108,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns) ns->pi_type = 0; ns->metadata_size = 0; - if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY_T10)) + if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) nvmet_bdev_ns_enable_integrity(ns); if (bdev_is_zoned(ns->bdev)) { @@ -131,27 +137,20 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) * Right now there exists M : 1 mapping between block layer error * to the NVMe status code (see nvme_error_status()). For consistency, * when we reverse map we use most appropriate NVMe Status code from - * the group of the NVMe staus codes used in the nvme_error_status(). + * the group of the NVMe status codes used in the nvme_error_status(). */ switch (blk_sts) { case BLK_STS_NOSPC: - status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR; + status = NVME_SC_CAP_EXCEEDED | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, length); break; case BLK_STS_TARGET: - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_rw_command, slba); break; case BLK_STS_NOTSUPP: + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); - switch (req->cmd->common.opcode) { - case nvme_cmd_dsm: - case nvme_cmd_write_zeroes: - status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR; - break; - default: - status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR; - } break; case BLK_STS_MEDIUM: status = NVME_SC_ACCESS_DENIED; @@ -159,7 +158,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) break; case BLK_STS_IOERR: default: - status = NVME_SC_INTERNAL | NVME_SC_DNR; + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); } @@ -270,6 +269,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) iter_flags = SG_MITER_FROM_SG; } + if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR)) + opf |= REQ_FAILFAST_DEV; + if (is_pci_p2pdma_page(sg_page(req->sg))) opf |= REQ_NOMERGE; @@ -356,7 +358,7 @@ u16 nvmet_bdev_flush(struct nvmet_req *req) return 0; if (blkdev_issue_flush(req->ns->bdev)) - return NVME_SC_INTERNAL | NVME_SC_DNR; + return NVME_SC_INTERNAL | NVME_STATUS_DNR; return 0; } diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index e589915ddef8..fc8e7c9ad858 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -33,10 +33,12 @@ struct nvme_loop_ctrl { struct list_head list; struct blk_mq_tag_set tag_set; - struct nvme_loop_iod async_event_iod; struct nvme_ctrl ctrl; struct nvmet_port *port; + + /* Must be last --ends in a flexible-array member. */ + struct nvme_loop_iod async_event_iod; }; static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) @@ -148,8 +150,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, nvme_start_request(req); iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; iod->req.port = queue->ctrl->port; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, - &queue->nvme_sq, &nvme_loop_ops)) + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) return BLK_STS_OK; if (blk_rq_nr_phys_segments(req)) { @@ -162,7 +163,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, } iod->req.sg = iod->sg_table.sgl; - iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); + iod->req.sg_cnt = blk_rq_map_sg(req, iod->sg_table.sgl); iod->req.transfer_len = blk_rq_payload_bytes(req); } @@ -181,8 +182,7 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg) iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH; iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; - if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq, - &nvme_loop_ops)) { + if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) { dev_err(ctrl->ctrl.device, "failed async event work\n"); return; } @@ -265,7 +265,15 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) { if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) return; + /* + * It's possible that some requests might have been added + * after admin queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_admin_queue(&ctrl->ctrl); + nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); nvme_remove_admin_tag_set(&ctrl->ctrl); } @@ -295,8 +303,15 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i < ctrl->ctrl.queue_count; i++) { clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); + nvmet_cq_put(&ctrl->queues[i].nvme_cq); } ctrl->ctrl.queue_count = 1; + /* + * It's possible that some requests might have been added + * after io queue is stopped/quiesced. So now start the + * queue to flush these requests to the completion. + */ + nvme_unquiesce_io_queues(&ctrl->ctrl); } static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) @@ -314,9 +329,13 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) for (i = 1; i <= nr_io_queues; i++) { ctrl->queues[i].ctrl = ctrl; - ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); - if (ret) + nvmet_cq_init(&ctrl->queues[i].nvme_cq); + ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq, + &ctrl->queues[i].nvme_cq); + if (ret) { + nvmet_cq_put(&ctrl->queues[i].nvme_cq); goto out_destroy_queues; + } ctrl->ctrl.queue_count++; } @@ -347,9 +366,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) int error; ctrl->queues[0].ctrl = ctrl; - error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); - if (error) + nvmet_cq_init(&ctrl->queues[0].nvme_cq); + error = nvmet_sq_init(&ctrl->queues[0].nvme_sq, + &ctrl->queues[0].nvme_cq); + if (error) { + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; + } ctrl->ctrl.queue_count = 1; error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set, @@ -388,6 +411,7 @@ out_cleanup_tagset: nvme_remove_admin_tag_set(&ctrl->ctrl); out_free_sq: nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); + nvmet_cq_put(&ctrl->queues[0].nvme_cq); return error; } @@ -487,6 +511,7 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { .submit_async_event = nvme_loop_submit_async_event, .delete_ctrl = nvme_loop_delete_ctrl_host, .get_address = nvmf_get_address, + .get_virt_boundary = nvme_get_virt_boundary, }; static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) @@ -555,6 +580,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, goto out; } + ret = nvme_add_ctrl(&ctrl->ctrl); + if (ret) + goto out_put_ctrl; + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) WARN_ON_ONCE(1); @@ -611,6 +640,7 @@ out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); +out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); out: if (ret > 0) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 2f22b07eab29..b664b584fdc8 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -20,9 +20,11 @@ #include <linux/blkdev.h> #include <linux/radix-tree.h> #include <linux/t10-pi.h> +#include <linux/kfifo.h> -#define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) +#define NVMET_DEFAULT_VS NVME_VS(2, 1, 0) +#define NVMET_NS_ENABLED XA_MARK_1 #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 #define NVMET_NO_ERROR_LOC ((u16)-1) @@ -30,6 +32,13 @@ #define NVMET_MN_MAX_SIZE 40 #define NVMET_SN_MAX_SIZE 20 #define NVMET_FR_MAX_SIZE 8 +#define NVMET_PR_LOG_QUEUE_SIZE 64 + +#define nvmet_for_each_ns(xa, index, entry) \ + xa_for_each(xa, index, entry) + +#define nvmet_for_each_enabled_ns(xa, index, entry) \ + xa_for_each_marked(xa, index, entry, NVMET_NS_ENABLED) /* * Supported optional AENs: @@ -56,6 +65,38 @@ #define IPO_IATTR_CONNECT_SQE(x) \ (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) +struct nvmet_pr_registrant { + u64 rkey; + uuid_t hostid; + enum nvme_pr_type rtype; + struct list_head entry; + struct rcu_head rcu; +}; + +struct nvmet_pr { + bool enable; + unsigned long notify_mask; + atomic_t generation; + struct nvmet_pr_registrant __rcu *holder; + /* + * During the execution of the reservation command, mutual + * exclusion is required throughout the process. However, + * while waiting asynchronously for the 'per controller + * percpu_ref' to complete before the 'preempt and abort' + * command finishes, a semaphore is needed to ensure mutual + * exclusion instead of a mutex. + */ + struct semaphore pr_sem; + struct list_head registrant_list; +}; + +struct nvmet_pr_per_ctrl_ref { + struct percpu_ref ref; + struct completion free_done; + struct completion confirm_done; + uuid_t hostid; +}; + struct nvmet_ns { struct percpu_ref ref; struct file *bdev_file; @@ -85,6 +126,8 @@ struct nvmet_ns { int pi_type; int metadata_size; u8 csi; + struct nvmet_pr pr; + struct xarray pr_per_ctrl_refs; }; static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) @@ -98,13 +141,16 @@ static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) } struct nvmet_cq { + struct nvmet_ctrl *ctrl; u16 qid; u16 size; + refcount_t ref; }; struct nvmet_sq { struct nvmet_ctrl *ctrl; struct percpu_ref ref; + struct nvmet_cq *cq; u16 qid; u16 size; u32 sqhd; @@ -113,6 +159,7 @@ struct nvmet_sq { bool authenticated; struct delayed_work auth_expired_work; u16 dhchap_tid; + u8 sc_c; u8 dhchap_status; u8 dhchap_step; u8 *dhchap_c1; @@ -122,6 +169,9 @@ struct nvmet_sq { u8 *dhchap_skey; int dhchap_skey_len; #endif +#ifdef CONFIG_NVME_TARGET_TCP_TLS + struct key *tls_key; +#endif struct completion free_done; struct completion confirm_done; }; @@ -191,9 +241,19 @@ static inline bool nvmet_port_secure_channel_required(struct nvmet_port *port) return nvmet_port_disc_addr_treq_secure_channel(port) == NVMF_TREQ_REQUIRED; } +struct nvmet_pr_log_mgr { + struct mutex lock; + u64 lost_count; + u64 counter; + DECLARE_KFIFO(log_queue, struct nvme_pr_log, NVMET_PR_LOG_QUEUE_SIZE); +}; + struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; + struct nvmet_cq **cqs; + + void *drvdata; bool reset_tbkas; @@ -225,16 +285,18 @@ struct nvmet_ctrl { __le32 *changed_ns_list; u32 nr_changed_ns; - char subsysnqn[NVMF_NQN_FIELD_LEN]; char hostnqn[NVMF_NQN_FIELD_LEN]; struct device *p2p_client; struct radix_tree_root p2p_ns_map; - +#ifdef CONFIG_NVME_TARGET_DEBUGFS + struct dentry *debugfs_dir; +#endif spinlock_t error_lock; u64 err_counter; struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; bool pi_support; + bool concat; #ifdef CONFIG_NVME_TARGET_AUTH struct nvme_dhchap_key *host_key; struct nvme_dhchap_key *ctrl_key; @@ -244,6 +306,10 @@ struct nvmet_ctrl { u8 *dh_key; size_t dh_keysize; #endif +#ifdef CONFIG_NVME_TARGET_TCP_TLS + struct key *tls_key; +#endif + struct nvmet_pr_log_mgr pr_log_mgr; }; struct nvmet_subsys { @@ -262,7 +328,9 @@ struct nvmet_subsys { struct list_head hosts; bool allow_any_host; - +#ifdef CONFIG_NVME_TARGET_DEBUGFS + struct dentry *debugfs_dir; +#endif u16 max_qid; u64 ver; @@ -276,6 +344,8 @@ struct nvmet_subsys { struct config_group namespaces_group; struct config_group allowed_hosts_group; + u16 vendor_id; + u16 subsys_vendor_id; char *model_number; u32 ieee_oui; char *firmware_rev; @@ -350,10 +420,24 @@ struct nvmet_fabrics_ops { void (*delete_ctrl)(struct nvmet_ctrl *ctrl); void (*disc_traddr)(struct nvmet_req *req, struct nvmet_port *port, char *traddr); + ssize_t (*host_traddr)(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len); u16 (*install_queue)(struct nvmet_sq *nvme_sq); void (*discovery_chg)(struct nvmet_port *port); u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); + + /* Operations mandatory for PCI target controllers */ + u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 cqid, u16 flags, + u16 qsize, u64 prp1); + u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid); + u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags, + u16 qsize, u64 prp1, u16 irq_vector); + u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid); + u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat, + void *feat_data); + u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat, + void *feat_data); }; #define NVMET_MAX_INLINE_BIOVEC 8 @@ -390,6 +474,9 @@ struct nvmet_req { struct work_struct zmgmt_work; } z; #endif /* CONFIG_BLK_DEV_ZONED */ + struct { + struct work_struct abort_work; + } r; }; int sg_cnt; int metadata_sg_cnt; @@ -406,6 +493,7 @@ struct nvmet_req { struct device *p2p_client; u16 error_loc; u64 error_slba; + struct nvmet_pr_per_ctrl_ref *pc_ref; }; #define NVMET_MAX_MPOOL_BVEC 16 @@ -459,18 +547,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); u16 nvmet_parse_connect_cmd(struct nvmet_req *req); +u32 nvmet_connect_cmd_data_len(struct nvmet_req *req); void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req); +u32 nvmet_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_admin_cmd(struct nvmet_req *req); +u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req); +u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req); +u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req); -bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, - struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); +bool nvmet_req_init(struct nvmet_req *req, struct nvmet_sq *sq, + const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); +size_t nvmet_req_transfer_len(struct nvmet_req *req); bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); void nvmet_req_complete(struct nvmet_req *req, u16 status); @@ -481,23 +575,51 @@ void nvmet_execute_set_features(struct nvmet_req *req); void nvmet_execute_get_features(struct nvmet_req *req); void nvmet_execute_keep_alive(struct nvmet_req *req); +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +u16 nvmet_check_io_cqid(struct nvmet_ctrl *ctrl, u16 cqid, bool create); +void nvmet_cq_init(struct nvmet_cq *cq); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); +u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, + u16 size); +void nvmet_cq_destroy(struct nvmet_cq *cq); +bool nvmet_cq_get(struct nvmet_cq *cq); +void nvmet_cq_put(struct nvmet_cq *cq); +bool nvmet_cq_in_use(struct nvmet_cq *cq); +u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create); void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size); +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + struct nvmet_cq *cq, u16 qid, u16 size); void nvmet_sq_destroy(struct nvmet_sq *sq); -int nvmet_sq_init(struct nvmet_sq *sq); +int nvmet_sq_init(struct nvmet_sq *sq, struct nvmet_cq *cq); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); -u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); + +struct nvmet_alloc_ctrl_args { + struct nvmet_port *port; + struct nvmet_sq *sq; + char *subsysnqn; + char *hostnqn; + uuid_t *hostid; + const struct nvmet_fabrics_ops *ops; + struct device *p2p_client; + u32 kato; + __le32 result; + u16 error_loc; + u16 status; +}; + +struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args); struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req); void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); u16 nvmet_check_ctrl_status(struct nvmet_req *req); +ssize_t nvmet_ctrl_host_traddr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len); struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, enum nvme_subsys_type type); @@ -543,7 +665,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, struct nvmet_host *host); void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, u8 event_info, u8 log_page); -bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid); #define NVMET_MIN_QUEUE_SIZE 16 #define NVMET_MAX_QUEUE_SIZE 1024 @@ -632,6 +753,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys) return subsys->type != NVME_NQN_NVME; } +static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl) +{ + return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI; +} + #ifdef CONFIG_NVME_TARGET_PASSTHRU void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); @@ -673,10 +799,45 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl); u16 errno_to_nvme_status(struct nvmet_req *req, int errno); u16 nvmet_report_invalid_opcode(struct nvmet_req *req); +static inline bool nvmet_cc_en(u32 cc) +{ + return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT; +} + +static inline u8 nvmet_cc_css(u32 cc) +{ + return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT; +} + +static inline u8 nvmet_cc_mps(u32 cc) +{ + return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT; +} + +static inline u8 nvmet_cc_ams(u32 cc) +{ + return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT; +} + +static inline u8 nvmet_cc_shn(u32 cc) +{ + return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT; +} + +static inline u8 nvmet_cc_iosqes(u32 cc) +{ + return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT; +} + +static inline u8 nvmet_cc_iocqes(u32 cc) +{ + return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT; +} + /* Convert a 32-bit number to a 16-bit 0's based number */ static inline __le16 to0based(u32 a) { - return cpu_to_le16(max(1U, min(1U << 16, a)) - 1); + return cpu_to_le16(clamp(a, 1U, 1U << 16) - 1); } static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns) @@ -706,15 +867,35 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) { if (bio != &req->b.inline_bio) bio_put(bio); + else + bio_uninit(bio); } +#ifdef CONFIG_NVME_TARGET_TCP_TLS +static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) +{ + return sq->tls_key ? key_serial(sq->tls_key) : 0; +} +static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) +{ + if (sq->tls_key) { + key_put(sq->tls_key); + sq->tls_key = NULL; + } +} +#else +static inline key_serial_t nvmet_queue_tls_keyid(struct nvmet_sq *sq) { return 0; } +static inline void nvmet_sq_put_tls_key(struct nvmet_sq *sq) {} +#endif #ifdef CONFIG_NVME_TARGET_AUTH +u32 nvmet_auth_send_data_len(struct nvmet_req *req); void nvmet_execute_auth_send(struct nvmet_req *req); +u32 nvmet_auth_receive_data_len(struct nvmet_req *req); void nvmet_execute_auth_receive(struct nvmet_req *req); int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, bool set_ctrl); int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); -u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl); +u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq); void nvmet_auth_sq_init(struct nvmet_sq *sq); void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); void nvmet_auth_sq_free(struct nvmet_sq *sq); @@ -724,16 +905,18 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, unsigned int hash_len); int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, unsigned int hash_len); -static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) +static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq) { - return ctrl->host_key != NULL; + return ctrl->host_key != NULL && !nvmet_queue_tls_keyid(sq); } int nvmet_auth_ctrl_exponential(struct nvmet_req *req, u8 *buf, int buf_size); int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, u8 *buf, int buf_size); +void nvmet_auth_insert_psk(struct nvmet_sq *sq); #else -static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl) +static inline u8 nvmet_setup_auth(struct nvmet_ctrl *ctrl, + struct nvmet_sq *sq) { return 0; } @@ -746,11 +929,49 @@ static inline bool nvmet_check_auth_status(struct nvmet_req *req) { return true; } -static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) +static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl, + struct nvmet_sq *sq) { return false; } static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; } +static inline void nvmet_auth_insert_psk(struct nvmet_sq *sq) {}; #endif +int nvmet_pr_init_ns(struct nvmet_ns *ns); +u16 nvmet_parse_pr_cmd(struct nvmet_req *req); +u16 nvmet_pr_check_cmd_access(struct nvmet_req *req); +int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl); +void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl); +void nvmet_pr_exit_ns(struct nvmet_ns *ns); +void nvmet_execute_get_log_page_resv(struct nvmet_req *req); +u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask); +u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req); +u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req); +static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref) +{ + percpu_ref_put(&pc_ref->ref); +} + +/* + * Data for the get_feature() and set_feature() operations of PCI target + * controllers. + */ +struct nvmet_feat_irq_coalesce { + u8 thr; + u8 time; +}; + +struct nvmet_feat_irq_config { + u16 iv; + bool cd; +}; + +struct nvmet_feat_arbitration { + u8 hpw; + u8 mpw; + u8 lpw; + u8 ab; +}; + #endif /* _NVMET_H */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index f003782d4ecf..96648ec2fadb 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -13,7 +13,7 @@ #include "../host/nvme.h" #include "nvmet.h" -MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU); +MODULE_IMPORT_NS("NVME_TARGET_PASSTHRU"); /* * xarray to maintain one passthru subsystem per nvme controller. @@ -99,14 +99,14 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) /* * The passthru NVMe driver may have a limit on the number of segments - * which depends on the host's memory fragementation. To solve this, + * which depends on the host's memory fragmentation. To solve this, * ensure mdts is limited to the pages equal to the number of segments. */ max_hw_sectors = min_not_zero(pctrl->max_segments << PAGE_SECTORS_SHIFT, pctrl->max_hw_sectors); /* - * nvmet_passthru_map_sg is limitted to using a single bio so limit + * nvmet_passthru_map_sg is limited to using a single bio so limit * the mdts based on BIO_MAX_VECS as well */ max_hw_sectors = min_not_zero(BIO_MAX_VECS << PAGE_SECTORS_SHIFT, @@ -147,10 +147,10 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req) * When passthru controller is setup using nvme-loop transport it will * export the passthru ctrl subsysnqn (PCIe NVMe ctrl) and will fail in * the nvme/host/core.c in the nvme_init_subsystem()->nvme_active_ctrl() - * code path with duplicate ctr subsynqn. In order to prevent that we + * code path with duplicate ctrl subsysnqn. In order to prevent that we * mask the passthru-ctrl subsysnqn with the target ctrl subsysnqn. */ - memcpy(id->subnqn, ctrl->subsysnqn, sizeof(id->subnqn)); + memcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn)); /* use fabric id-ctrl values */ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + @@ -261,6 +261,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) { struct scatterlist *sg; struct bio *bio; + int ret = -EINVAL; int i; if (req->sg_cnt > BIO_MAX_VECS) @@ -277,16 +278,19 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) } for_each_sg(req->sg, sg, req->sg_cnt, i) { - if (bio_add_pc_page(rq->q, bio, sg_page(sg), sg->length, - sg->offset) < sg->length) { - nvmet_req_bio_put(req, bio); - return -EINVAL; - } + if (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) < + sg->length) + goto out_bio_put; } - blk_rq_bio_prep(rq, bio, req->sg_cnt); - + ret = blk_rq_append_bio(rq, bio); + if (ret) + goto out_bio_put; return 0; + +out_bio_put: + nvmet_req_bio_put(req, bio); + return ret; } static void nvmet_passthru_execute_cmd(struct nvmet_req *req) @@ -306,7 +310,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) ns = nvme_find_get_ns(ctrl, nsid); if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -426,7 +430,7 @@ u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) * emulated in the future if regular targets grow support for * this feature. */ - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } return nvmet_setup_passthru_command(req); @@ -478,7 +482,7 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) case NVME_FEAT_RESV_PERSIST: /* No reservations, see nvmet_parse_passthru_io_cmd() */ default: - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; } } @@ -529,16 +533,14 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) case NVME_FEAT_HOST_ID: req->execute = nvmet_execute_get_features; return NVME_SC_SUCCESS; + case NVME_FEAT_FDP: + return nvmet_setup_passthru_command(req); default: return nvmet_passthru_get_set_features(req); } break; case nvme_admin_identify: switch (req->cmd->identify.cns) { - case NVME_ID_CNS_CTRL: - req->execute = nvmet_passthru_execute_cmd; - req->p.use_workqueue = true; - return NVME_SC_SUCCESS; case NVME_ID_CNS_CS_CTRL: switch (req->cmd->identify.csi) { case NVME_CSI_ZNS: @@ -546,8 +548,10 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + case NVME_ID_CNS_CTRL: case NVME_ID_CNS_NS: + case NVME_ID_CNS_NS_DESC_LIST: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; @@ -558,7 +562,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->p.use_workqueue = true; return NVME_SC_SUCCESS; } - return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; default: return nvmet_setup_passthru_command(req); } diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c new file mode 100644 index 000000000000..f858a6c9d7cb --- /dev/null +++ b/drivers/nvme/target/pci-epf.c @@ -0,0 +1,2651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe PCI Endpoint Function target driver. + * + * Copyright (c) 2024, Western Digital Corporation or its affiliates. + * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com> + * REDS Institute, HEIG-VD, HES-SO, Switzerland + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/io.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvme.h> +#include <linux/pci_ids.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci_regs.h> +#include <linux/slab.h> + +#include "nvmet.h" + +static LIST_HEAD(nvmet_pci_epf_ports); +static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); + +/* + * Default and maximum allowed data transfer size. For the default, + * allow up to 128 page-sized segments. For the maximum allowed, + * use 4 times the default (which is completely arbitrary). + */ +#define NVMET_PCI_EPF_MAX_SEGS 128 +#define NVMET_PCI_EPF_MDTS_KB \ + (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10)) +#define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4) + +/* + * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an + * interrupt vector to the host. This default 8 is completely arbitrary and can + * be changed by the host with a nvme_set_features command. + */ +#define NVMET_PCI_EPF_IV_THRESHOLD 8 + +/* + * BAR CC register and SQ polling intervals. + */ +#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10) +#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) +#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000) + +/* + * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ. + */ +#define NVMET_PCI_EPF_SQ_AB 8 + +/* + * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ + * is full, in which case we retry the CQ processing after this interval. + */ +#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) + +enum nvmet_pci_epf_queue_flags { + NVMET_PCI_EPF_Q_LIVE = 0, /* The queue is live */ + NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ +}; + +/* + * IRQ vector descriptor. + */ +struct nvmet_pci_epf_irq_vector { + unsigned int vector; + unsigned int ref; + bool cd; + int nr_irqs; +}; + +struct nvmet_pci_epf_queue { + union { + struct nvmet_sq nvme_sq; + struct nvmet_cq nvme_cq; + }; + struct nvmet_pci_epf_ctrl *ctrl; + unsigned long flags; + + u64 pci_addr; + size_t pci_size; + struct pci_epc_map pci_map; + + u16 qid; + u16 depth; + u16 vector; + u16 head; + u16 tail; + u16 phase; + u32 db; + + size_t qes; + + struct nvmet_pci_epf_irq_vector *iv; + struct workqueue_struct *iod_wq; + struct delayed_work work; + spinlock_t lock; + struct list_head list; +}; + +/* + * PCI Root Complex (RC) address data segment for mapping an admin or + * I/O command buffer @buf of @length bytes to the PCI address @pci_addr. + */ +struct nvmet_pci_epf_segment { + void *buf; + u64 pci_addr; + u32 length; +}; + +/* + * Command descriptors. + */ +struct nvmet_pci_epf_iod { + struct list_head link; + + struct nvmet_req req; + struct nvme_command cmd; + struct nvme_completion cqe; + unsigned int status; + + struct nvmet_pci_epf_ctrl *ctrl; + + struct nvmet_pci_epf_queue *sq; + struct nvmet_pci_epf_queue *cq; + + /* Data transfer size and direction for the command. */ + size_t data_len; + enum dma_data_direction dma_dir; + + /* + * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we + * use only @data_seg. Otherwise, the array of segments @data_segs is + * allocated to manage multiple PCI address data segments. @data_sgl and + * @data_sgt are used to setup the command request for execution by the + * target core. + */ + unsigned int nr_data_segs; + struct nvmet_pci_epf_segment data_seg; + struct nvmet_pci_epf_segment *data_segs; + struct scatterlist data_sgl; + struct sg_table data_sgt; + + struct work_struct work; + struct completion done; +}; + +/* + * PCI target controller private data. + */ +struct nvmet_pci_epf_ctrl { + struct nvmet_pci_epf *nvme_epf; + struct nvmet_port *port; + struct nvmet_ctrl *tctrl; + struct device *dev; + + unsigned int nr_queues; + struct nvmet_pci_epf_queue *sq; + struct nvmet_pci_epf_queue *cq; + unsigned int sq_ab; + + mempool_t iod_pool; + void *bar; + u64 cap; + u32 cc; + u32 csts; + + size_t io_sqes; + size_t io_cqes; + + size_t mps_shift; + size_t mps; + size_t mps_mask; + + unsigned int mdts; + + struct delayed_work poll_cc; + struct delayed_work poll_sqs; + + struct mutex irq_lock; + struct nvmet_pci_epf_irq_vector *irq_vectors; + unsigned int irq_vector_threshold; + + bool link_up; + bool enabled; +}; + +/* + * PCI EPF driver private data. + */ +struct nvmet_pci_epf { + struct pci_epf *epf; + + const struct pci_epc_features *epc_features; + + void *reg_bar; + size_t msix_table_offset; + + unsigned int irq_type; + unsigned int nr_vectors; + + struct nvmet_pci_epf_ctrl ctrl; + + bool dma_enabled; + struct dma_chan *dma_tx_chan; + struct mutex dma_tx_lock; + struct dma_chan *dma_rx_chan; + struct mutex dma_rx_lock; + + struct mutex mmio_lock; + + /* PCI endpoint function configfs attributes. */ + struct config_group group; + __le16 portid; + char subsysnqn[NVMF_NQN_SIZE]; + unsigned int mdts_kb; +}; + +static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, + u32 off) +{ + __le32 *bar_reg = ctrl->bar + off; + + return le32_to_cpu(READ_ONCE(*bar_reg)); +} + +static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, + u32 off, u32 val) +{ + __le32 *bar_reg = ctrl->bar + off; + + WRITE_ONCE(*bar_reg, cpu_to_le32(val)); +} + +static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, + u32 off) +{ + return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | + ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); +} + +static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, + u32 off, u64 val) +{ + nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); + nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); +} + +static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf, + u64 pci_addr, size_t size, struct pci_epc_map *map) +{ + struct pci_epf *epf = nvme_epf->epf; + + return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, + pci_addr, size, map); +} + +static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf, + struct pci_epc_map *map) +{ + struct pci_epf *epf = nvme_epf->epf; + + pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); +} + +struct nvmet_pci_epf_dma_filter { + struct device *dev; + u32 dma_mask; +}; + +static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg) +{ + struct nvmet_pci_epf_dma_filter *filter = arg; + struct dma_slave_caps caps; + + memset(&caps, 0, sizeof(caps)); + dma_get_slave_caps(chan, &caps); + + return chan->device->dev == filter->dev && + (filter->dma_mask & caps.directions); +} + +static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + struct device *dev = &epf->dev; + struct nvmet_pci_epf_dma_filter filter; + struct dma_chan *chan; + dma_cap_mask_t mask; + + mutex_init(&nvme_epf->dma_rx_lock); + mutex_init(&nvme_epf->dma_tx_lock); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + filter.dev = epf->epc->dev.parent; + filter.dma_mask = BIT(DMA_DEV_TO_MEM); + + chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); + if (!chan) + goto out_dma_no_rx; + + nvme_epf->dma_rx_chan = chan; + + filter.dma_mask = BIT(DMA_MEM_TO_DEV); + chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); + if (!chan) + goto out_dma_no_tx; + + nvme_epf->dma_tx_chan = chan; + + nvme_epf->dma_enabled = true; + + dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", + dma_chan_name(nvme_epf->dma_rx_chan), + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> + dma_rx_chan))); + + dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", + dma_chan_name(nvme_epf->dma_tx_chan), + dma_get_max_seg_size(dmaengine_get_dma_device(nvme_epf-> + dma_tx_chan))); + + return; + +out_dma_no_tx: + dma_release_channel(nvme_epf->dma_rx_chan); + nvme_epf->dma_rx_chan = NULL; + +out_dma_no_rx: + mutex_destroy(&nvme_epf->dma_rx_lock); + mutex_destroy(&nvme_epf->dma_tx_lock); + nvme_epf->dma_enabled = false; + + dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); +} + +static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf) +{ + if (!nvme_epf->dma_enabled) + return; + + dma_release_channel(nvme_epf->dma_tx_chan); + nvme_epf->dma_tx_chan = NULL; + dma_release_channel(nvme_epf->dma_rx_chan); + nvme_epf->dma_rx_chan = NULL; + mutex_destroy(&nvme_epf->dma_rx_lock); + mutex_destroy(&nvme_epf->dma_tx_lock); + nvme_epf->dma_enabled = false; +} + +static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + struct pci_epf *epf = nvme_epf->epf; + struct dma_async_tx_descriptor *desc; + struct dma_slave_config sconf = {}; + struct device *dev = &epf->dev; + struct device *dma_dev; + struct dma_chan *chan; + dma_cookie_t cookie; + dma_addr_t dma_addr; + struct mutex *lock; + int ret; + + switch (dir) { + case DMA_FROM_DEVICE: + lock = &nvme_epf->dma_rx_lock; + chan = nvme_epf->dma_rx_chan; + sconf.direction = DMA_DEV_TO_MEM; + sconf.src_addr = seg->pci_addr; + break; + case DMA_TO_DEVICE: + lock = &nvme_epf->dma_tx_lock; + chan = nvme_epf->dma_tx_chan; + sconf.direction = DMA_MEM_TO_DEV; + sconf.dst_addr = seg->pci_addr; + break; + default: + return -EINVAL; + } + + mutex_lock(lock); + + dma_dev = dmaengine_get_dma_device(chan); + dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); + ret = dma_mapping_error(dma_dev, dma_addr); + if (ret) + goto unlock; + + ret = dmaengine_slave_config(chan, &sconf); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto unmap; + } + + desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, + sconf.direction, DMA_CTRL_ACK); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto unmap; + } + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret); + goto unmap; + } + + if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) { + dev_err(dev, "DMA transfer failed\n"); + ret = -EIO; + } + + dmaengine_terminate_sync(chan); + +unmap: + dma_unmap_single(dma_dev, dma_addr, seg->length, dir); + +unlock: + mutex_unlock(lock); + + return ret; +} + +static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + u64 pci_addr = seg->pci_addr; + u32 length = seg->length; + void *buf = seg->buf; + struct pci_epc_map map; + int ret = -EINVAL; + + /* + * Note: MMIO transfers do not need serialization but this is a + * simple way to avoid using too many mapping windows. + */ + mutex_lock(&nvme_epf->mmio_lock); + + while (length) { + ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map); + if (ret) + break; + + switch (dir) { + case DMA_FROM_DEVICE: + memcpy_fromio(buf, map.virt_addr, map.pci_size); + break; + case DMA_TO_DEVICE: + memcpy_toio(map.virt_addr, buf, map.pci_size); + break; + default: + ret = -EINVAL; + goto unlock; + } + + pci_addr += map.pci_size; + buf += map.pci_size; + length -= map.pci_size; + + nvmet_pci_epf_mem_unmap(nvme_epf, &map); + } + +unlock: + mutex_unlock(&nvme_epf->mmio_lock); + + return ret; +} + +static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + if (nvme_epf->dma_enabled) + return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir); + + return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir); +} + +static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, + void *buf, u64 pci_addr, u32 length, + enum dma_data_direction dir) +{ + struct nvmet_pci_epf_segment seg = { + .buf = buf, + .pci_addr = pci_addr, + .length = length, + }; + + return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); +} + +static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) +{ + ctrl->irq_vectors = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_irq_vector), + GFP_KERNEL); + if (!ctrl->irq_vectors) + return -ENOMEM; + + mutex_init(&ctrl->irq_lock); + + return 0; +} + +static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) +{ + if (ctrl->irq_vectors) { + mutex_destroy(&ctrl->irq_lock); + kfree(ctrl->irq_vectors); + ctrl->irq_vectors = NULL; + } +} + +static struct nvmet_pci_epf_irq_vector * +nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + int i; + + lockdep_assert_held(&ctrl->irq_lock); + + for (i = 0; i < ctrl->nr_queues; i++) { + iv = &ctrl->irq_vectors[i]; + if (iv->ref && iv->vector == vector) + return iv; + } + + return NULL; +} + +static struct nvmet_pci_epf_irq_vector * +nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + int i; + + mutex_lock(&ctrl->irq_lock); + + iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); + if (iv) { + iv->ref++; + goto unlock; + } + + for (i = 0; i < ctrl->nr_queues; i++) { + iv = &ctrl->irq_vectors[i]; + if (!iv->ref) + break; + } + + if (WARN_ON_ONCE(!iv)) + goto unlock; + + iv->ref = 1; + iv->vector = vector; + iv->nr_irqs = 0; + +unlock: + mutex_unlock(&ctrl->irq_lock); + + return iv; +} + +static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, + u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + + mutex_lock(&ctrl->irq_lock); + + iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); + if (iv) { + iv->ref--; + if (!iv->ref) { + iv->vector = 0; + iv->nr_irqs = 0; + } + } + + mutex_unlock(&ctrl->irq_lock); +} + +static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *cq, bool force) +{ + struct nvmet_pci_epf_irq_vector *iv = cq->iv; + bool ret; + + /* IRQ coalescing for the admin queue is not allowed. */ + if (!cq->qid) + return true; + + if (iv->cd) + return true; + + if (force) { + ret = iv->nr_irqs > 0; + } else { + iv->nr_irqs++; + ret = iv->nr_irqs >= ctrl->irq_vector_threshold; + } + if (ret) + iv->nr_irqs = 0; + + return ret; +} + +static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *cq, bool force) +{ + struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; + struct pci_epf *epf = nvme_epf->epf; + int ret = 0; + + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) || + !test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + return; + + mutex_lock(&ctrl->irq_lock); + + if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) + goto unlock; + + switch (nvme_epf->irq_type) { + case PCI_IRQ_MSIX: + case PCI_IRQ_MSI: + /* + * If we fail to raise an MSI or MSI-X interrupt, it is likely + * because the host is using legacy INTX IRQs (e.g. BIOS, + * grub), but we can fallback to the INTX type only if the + * endpoint controller supports this type. + */ + ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, + nvme_epf->irq_type, cq->vector + 1); + if (!ret || !nvme_epf->epc_features->intx_capable) + break; + fallthrough; + case PCI_IRQ_INTX: + ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, + PCI_IRQ_INTX, 0); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + break; + } + + if (ret) + dev_err_ratelimited(ctrl->dev, + "CQ[%u]: Failed to raise IRQ (err=%d)\n", + cq->qid, ret); + +unlock: + mutex_unlock(&ctrl->irq_lock); +} + +static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) +{ + return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); +} + +static void nvmet_pci_epf_exec_iod_work(struct work_struct *work); + +static struct nvmet_pci_epf_iod * +nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq) +{ + struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; + struct nvmet_pci_epf_iod *iod; + + iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); + if (unlikely(!iod)) + return NULL; + + memset(iod, 0, sizeof(*iod)); + iod->req.cmd = &iod->cmd; + iod->req.cqe = &iod->cqe; + iod->req.port = ctrl->port; + iod->ctrl = ctrl; + iod->sq = sq; + iod->cq = &ctrl->cq[sq->qid]; + INIT_LIST_HEAD(&iod->link); + iod->dma_dir = DMA_NONE; + INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); + init_completion(&iod->done); + + return iod; +} + +/* + * Allocate or grow a command table of PCI segments. + */ +static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod, + int nsegs) +{ + struct nvmet_pci_epf_segment *segs; + int nr_segs = iod->nr_data_segs + nsegs; + + segs = krealloc(iod->data_segs, + nr_segs * sizeof(struct nvmet_pci_epf_segment), + GFP_KERNEL | __GFP_ZERO); + if (!segs) + return -ENOMEM; + + iod->nr_data_segs = nr_segs; + iod->data_segs = segs; + + return 0; +} + +static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod) +{ + int i; + + if (iod->data_segs) { + for (i = 0; i < iod->nr_data_segs; i++) + kfree(iod->data_segs[i].buf); + if (iod->data_segs != &iod->data_seg) + kfree(iod->data_segs); + } + if (iod->data_sgt.nents > 1) + sg_free_table(&iod->data_sgt); + mempool_free(iod, &iod->ctrl->iod_pool); +} + +static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; + struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; + int i, ret; + + /* Split the data transfer according to the PCI segments. */ + for (i = 0; i < iod->nr_data_segs; i++, seg++) { + ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); + if (ret) { + iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; + return ret; + } + } + + return 0; +} + +static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, + u64 prp) +{ + return prp & ctrl->mps_mask; +} + +static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, + u64 prp) +{ + return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); +} + +/* + * Transfer a PRP list from the host and return the number of prps. + */ +static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, + size_t xfer_len, __le64 *prps) +{ + size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; + u32 length; + int ret; + + /* + * Compute the number of PRPs required for the number of bytes to + * transfer (xfer_len). If this number overflows the memory page size + * with the PRP list pointer specified, only return the space available + * in the memory page, the last PRP in there will be a PRP list pointer + * to the remaining PRPs. + */ + length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); + ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); + if (ret) + return ret; + + return length >> 3; +} + +static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + struct nvmet_pci_epf_segment *seg; + size_t size = 0, ofst, prp_size, xfer_len; + size_t transfer_len = iod->data_len; + int nr_segs, nr_prps = 0; + u64 pci_addr, prp; + int i = 0, ret; + __le64 *prps; + + prps = kzalloc(ctrl->mps, GFP_KERNEL); + if (!prps) + goto err_internal; + + /* + * Allocate PCI segments for the command: this considers the worst case + * scenario where all prps are discontiguous, so get as many segments + * as we can have prps. In practice, most of the time, we will have + * far less PCI segments than prps. + */ + prp = le64_to_cpu(cmd->common.dptr.prp1); + if (!prp) + goto err_invalid_field; + + ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); + nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; + + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); + if (ret) + goto err_internal; + + /* Set the first segment using prp1. */ + seg = &iod->data_segs[0]; + seg->pci_addr = prp; + seg->length = nvmet_pci_epf_prp_size(ctrl, prp); + + size = seg->length; + pci_addr = prp + size; + nr_segs = 1; + + /* + * Now build the PCI address segments using the PRP lists, starting + * from prp2. + */ + prp = le64_to_cpu(cmd->common.dptr.prp2); + if (!prp) + goto err_invalid_field; + + while (size < transfer_len) { + xfer_len = transfer_len - size; + + if (!nr_prps) { + nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, + xfer_len, prps); + if (nr_prps < 0) + goto err_internal; + + i = 0; + ofst = 0; + } + + /* Current entry */ + prp = le64_to_cpu(prps[i]); + if (!prp) + goto err_invalid_field; + + /* Did we reach the last PRP entry of the list? */ + if (xfer_len > ctrl->mps && i == nr_prps - 1) { + /* We need more PRPs: PRP is a list pointer. */ + nr_prps = 0; + continue; + } + + /* Only the first PRP is allowed to have an offset. */ + if (nvmet_pci_epf_prp_ofst(ctrl, prp)) + goto err_invalid_offset; + + if (prp != pci_addr) { + /* Discontiguous prp: new segment. */ + nr_segs++; + if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) + goto err_internal; + + seg++; + seg->pci_addr = prp; + seg->length = 0; + pci_addr = prp; + } + + prp_size = min_t(size_t, ctrl->mps, xfer_len); + seg->length += prp_size; + pci_addr += prp_size; + size += prp_size; + + i++; + } + + iod->nr_data_segs = nr_segs; + ret = 0; + + if (size != transfer_len) { + dev_err(ctrl->dev, + "PRPs transfer length mismatch: got %zu B, need %zu B\n", + size, transfer_len); + goto err_internal; + } + + kfree(prps); + + return 0; + +err_invalid_offset: + dev_err(ctrl->dev, "PRPs list invalid offset\n"); + iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + goto err; + +err_invalid_field: + dev_err(ctrl->dev, "PRPs list invalid field\n"); + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto err; + +err_internal: + dev_err(ctrl->dev, "PRPs list internal error\n"); + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + +err: + kfree(prps); + return -EINVAL; +} + +static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + size_t transfer_len = iod->data_len; + int ret, nr_segs = 1; + u64 prp1, prp2 = 0; + size_t prp1_size; + + prp1 = le64_to_cpu(cmd->common.dptr.prp1); + prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); + + /* For commands crossing a page boundary, we should have prp2. */ + if (transfer_len > prp1_size) { + prp2 = le64_to_cpu(cmd->common.dptr.prp2); + if (!prp2) { + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + return -EINVAL; + } + if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { + iod->status = + NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + return -EINVAL; + } + if (prp2 != prp1 + prp1_size) + nr_segs = 2; + } + + if (nr_segs == 1) { + iod->nr_data_segs = 1; + iod->data_segs = &iod->data_seg; + iod->data_segs[0].pci_addr = prp1; + iod->data_segs[0].length = transfer_len; + return 0; + } + + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); + if (ret) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return ret; + } + + iod->data_segs[0].pci_addr = prp1; + iod->data_segs[0].length = prp1_size; + iod->data_segs[1].pci_addr = prp2; + iod->data_segs[1].length = transfer_len - prp1_size; + + return 0; +} + +static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); + size_t ofst; + + /* Get the PCI address segments for the command using its PRPs. */ + ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); + if (ofst & 0x3) { + iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + return -EINVAL; + } + + if (iod->data_len + ofst <= ctrl->mps * 2) + return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); + + return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); +} + +/* + * Transfer an SGL segment from the host and return the number of data + * descriptors and the next segment descriptor, if any. + */ +static struct nvme_sgl_desc * +nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, + struct nvme_sgl_desc *desc, unsigned int *nr_sgls) +{ + struct nvme_sgl_desc *sgls; + u32 length = le32_to_cpu(desc->length); + int nr_descs, ret; + void *buf; + + buf = kmalloc(length, GFP_KERNEL); + if (!buf) + return NULL; + + ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, + DMA_FROM_DEVICE); + if (ret) { + kfree(buf); + return NULL; + } + + sgls = buf; + nr_descs = length / sizeof(struct nvme_sgl_desc); + if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || + sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { + /* + * We have another SGL segment following this one: do not count + * it as a regular data SGL descriptor and return it to the + * caller. + */ + *desc = sgls[nr_descs - 1]; + nr_descs--; + } else { + /* We do not have another SGL segment after this one. */ + desc->length = 0; + } + + *nr_sgls = nr_descs; + + return sgls; +} + +static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + struct nvme_sgl_desc seg = cmd->common.dptr.sgl; + struct nvme_sgl_desc *sgls = NULL; + int n = 0, i, nr_sgls; + int ret; + + /* + * We do not support inline data nor keyed SGLs, so we should be seeing + * only segment descriptors. + */ + if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) && + seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { + iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; + return -EIO; + } + + while (seg.length) { + sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); + if (!sgls) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return -EIO; + } + + /* Grow the PCI segment table as needed. */ + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls); + if (ret) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto out; + } + + /* + * Parse the SGL descriptors to build the PCI segment table, + * checking the descriptor type as we go. + */ + for (i = 0; i < nr_sgls; i++) { + if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) { + iod->status = NVME_SC_SGL_INVALID_TYPE | + NVME_STATUS_DNR; + goto out; + } + iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); + iod->data_segs[n].length = le32_to_cpu(sgls[i].length); + n++; + } + + kfree(sgls); + } + + out: + if (iod->status != NVME_SC_SUCCESS) { + kfree(sgls); + return -EIO; + } + + return 0; +} + +static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; + + if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { + /* Single data descriptor case. */ + iod->nr_data_segs = 1; + iod->data_segs = &iod->data_seg; + iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); + iod->data_seg.length = le32_to_cpu(sgl->length); + return 0; + } + + return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); +} + +static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + struct nvmet_req *req = &iod->req; + struct nvmet_pci_epf_segment *seg; + struct scatterlist *sg; + int ret, i; + + if (iod->data_len > ctrl->mdts) { + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + return -EINVAL; + } + + /* + * Get the PCI address segments for the command data buffer using either + * its SGLs or PRPs. + */ + if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) + ret = nvmet_pci_epf_iod_parse_sgls(iod); + else + ret = nvmet_pci_epf_iod_parse_prps(iod); + if (ret) + return ret; + + /* Get a command buffer using SGLs matching the PCI segments. */ + if (iod->nr_data_segs == 1) { + sg_init_table(&iod->data_sgl, 1); + iod->data_sgt.sgl = &iod->data_sgl; + iod->data_sgt.nents = 1; + iod->data_sgt.orig_nents = 1; + } else { + ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, + GFP_KERNEL); + if (ret) + goto err_nomem; + } + + for_each_sgtable_sg(&iod->data_sgt, sg, i) { + seg = &iod->data_segs[i]; + seg->buf = kmalloc(seg->length, GFP_KERNEL); + if (!seg->buf) + goto err_nomem; + sg_set_buf(sg, seg->buf, seg->length); + } + + req->transfer_len = iod->data_len; + req->sg = iod->data_sgt.sgl; + req->sg_cnt = iod->data_sgt.nents; + + return 0; + +err_nomem: + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return -ENOMEM; +} + +static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_queue *cq = iod->cq; + unsigned long flags; + + /* Print an error message for failed commands, except AENs. */ + iod->status = le16_to_cpu(iod->cqe.status) >> 1; + if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) + dev_err(iod->ctrl->dev, + "CQ[%d]: Command %s (0x%x) status 0x%0x\n", + iod->sq->qid, nvmet_pci_epf_iod_name(iod), + iod->cmd.common.opcode, iod->status); + + /* + * Add the command to the list of completed commands and schedule the + * CQ work. + */ + spin_lock_irqsave(&cq->lock, flags); + list_add_tail(&iod->link, &cq->list); + queue_delayed_work(system_highpri_wq, &cq->work, 0); + spin_unlock_irqrestore(&cq->lock, flags); +} + +static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue) +{ + struct nvmet_pci_epf_iod *iod; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + while (!list_empty(&queue->list)) { + iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, + link); + list_del_init(&iod->link); + nvmet_pci_epf_free_iod(iod); + } + spin_unlock_irqrestore(&queue->lock, flags); +} + +static int nvmet_pci_epf_add_port(struct nvmet_port *port) +{ + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_add_tail(&port->entry, &nvmet_pci_epf_ports); + mutex_unlock(&nvmet_pci_epf_ports_mutex); + return 0; +} + +static void nvmet_pci_epf_remove_port(struct nvmet_port *port) +{ + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_del_init(&port->entry); + mutex_unlock(&nvmet_pci_epf_ports_mutex); +} + +static struct nvmet_port * +nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) +{ + struct nvmet_port *p, *port = NULL; + + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_for_each_entry(p, &nvmet_pci_epf_ports, entry) { + if (p->disc_addr.portid == portid) { + port = p; + break; + } + } + mutex_unlock(&nvmet_pci_epf_ports_mutex); + + return port; +} + +static void nvmet_pci_epf_queue_response(struct nvmet_req *req) +{ + struct nvmet_pci_epf_iod *iod = + container_of(req, struct nvmet_pci_epf_iod, req); + + iod->status = le16_to_cpu(req->cqe->status) >> 1; + + /* + * If the command failed or we have no data to transfer, complete the + * command immediately. + */ + if (iod->status || !iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { + nvmet_pci_epf_complete_iod(iod); + return; + } + + complete(&iod->done); +} + +static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; + + return ilog2(ctrl->mdts) - page_shift; +} + +static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, + u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; + u16 status; + int ret; + + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if (!(flags & NVME_QUEUE_PHYS_CONTIG)) + return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; + + cq->pci_addr = pci_addr; + cq->qid = cqid; + cq->depth = qsize + 1; + cq->vector = vector; + cq->head = 0; + cq->tail = 0; + cq->phase = 1; + cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); + nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); + + if (!cqid) + cq->qes = sizeof(struct nvme_completion); + else + cq->qes = ctrl->io_cqes; + cq->pci_size = cq->qes * cq->depth; + + if (flags & NVME_CQ_IRQ_ENABLED) { + cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); + if (!cq->iv) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); + } + + status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); + if (status != NVME_SC_SUCCESS) + goto err; + + /* + * Map the CQ PCI address space and since PCI endpoint controllers may + * return a partial mapping, check that the mapping is large enough. + */ + ret = nvmet_pci_epf_mem_map(ctrl->nvme_epf, cq->pci_addr, cq->pci_size, + &cq->pci_map); + if (ret) { + dev_err(ctrl->dev, "Failed to map CQ %u (err=%d)\n", + cq->qid, ret); + goto err_internal; + } + + if (cq->pci_map.pci_size < cq->pci_size) { + dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", + cq->qid); + goto err_unmap_queue; + } + + set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); + + if (test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", + cqid, qsize, cq->qes, cq->vector); + else + dev_dbg(ctrl->dev, + "CQ[%u]: %u entries of %zu B, IRQ disabled\n", + cqid, qsize, cq->qes); + + return NVME_SC_SUCCESS; + +err_unmap_queue: + nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); +err_internal: + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; +err: + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); + return status; +} + +static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; + + if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + cancel_delayed_work_sync(&cq->work); + nvmet_pci_epf_drain_queue(cq); + if (test_and_clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); + nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &cq->pci_map); + nvmet_cq_put(&cq->nvme_cq); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, + u16 sqid, u16 cqid, u16 flags, u16 qsize, u64 pci_addr) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; + u16 status; + + if (test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if (!(flags & NVME_QUEUE_PHYS_CONTIG)) + return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; + + sq->pci_addr = pci_addr; + sq->qid = sqid; + sq->depth = qsize + 1; + sq->head = 0; + sq->tail = 0; + sq->phase = 0; + sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); + nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); + if (!sqid) + sq->qes = 1UL << NVME_ADM_SQES; + else + sq->qes = ctrl->io_sqes; + sq->pci_size = sq->qes * sq->depth; + + status = nvmet_sq_create(tctrl, &sq->nvme_sq, &cq->nvme_cq, sqid, + sq->depth); + if (status != NVME_SC_SUCCESS) + return status; + + sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, + min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); + if (!sq->iod_wq) { + dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto out_destroy_sq; + } + + set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); + + dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", + sqid, qsize, sq->qes); + + return NVME_SC_SUCCESS; + +out_destroy_sq: + nvmet_sq_destroy(&sq->nvme_sq); + return status; +} + +static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + + if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + destroy_workqueue(sq->iod_wq); + sq->iod_wq = NULL; + + nvmet_pci_epf_drain_queue(sq); + + if (sq->nvme_sq.ctrl) + nvmet_sq_destroy(&sq->nvme_sq); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl, + u8 feat, void *data) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_feat_arbitration *arb; + struct nvmet_feat_irq_coalesce *irqc; + struct nvmet_feat_irq_config *irqcfg; + struct nvmet_pci_epf_irq_vector *iv; + u16 status; + + switch (feat) { + case NVME_FEAT_ARBITRATION: + arb = data; + if (!ctrl->sq_ab) + arb->ab = 0x7; + else + arb->ab = ilog2(ctrl->sq_ab); + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_COALESCE: + irqc = data; + irqc->thr = ctrl->irq_vector_threshold; + irqc->time = 0; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_CONFIG: + irqcfg = data; + mutex_lock(&ctrl->irq_lock); + iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); + if (iv) { + irqcfg->cd = iv->cd; + status = NVME_SC_SUCCESS; + } else { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + mutex_unlock(&ctrl->irq_lock); + return status; + + default: + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } +} + +static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl, + u8 feat, void *data) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_feat_arbitration *arb; + struct nvmet_feat_irq_coalesce *irqc; + struct nvmet_feat_irq_config *irqcfg; + struct nvmet_pci_epf_irq_vector *iv; + u16 status; + + switch (feat) { + case NVME_FEAT_ARBITRATION: + arb = data; + if (arb->ab == 0x7) + ctrl->sq_ab = 0; + else + ctrl->sq_ab = 1 << arb->ab; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_COALESCE: + /* + * Since we do not implement precise IRQ coalescing timing, + * ignore the time field. + */ + irqc = data; + ctrl->irq_vector_threshold = irqc->thr + 1; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_CONFIG: + irqcfg = data; + mutex_lock(&ctrl->irq_lock); + iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); + if (iv) { + iv->cd = irqcfg->cd; + status = NVME_SC_SUCCESS; + } else { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + mutex_unlock(&ctrl->irq_lock); + return status; + + default: + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } +} + +static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = { + .owner = THIS_MODULE, + .type = NVMF_TRTYPE_PCI, + .add_port = nvmet_pci_epf_add_port, + .remove_port = nvmet_pci_epf_remove_port, + .queue_response = nvmet_pci_epf_queue_response, + .get_mdts = nvmet_pci_epf_get_mdts, + .create_cq = nvmet_pci_epf_create_cq, + .delete_cq = nvmet_pci_epf_delete_cq, + .create_sq = nvmet_pci_epf_create_sq, + .delete_sq = nvmet_pci_epf_delete_sq, + .get_feature = nvmet_pci_epf_get_feat, + .set_feature = nvmet_pci_epf_set_feat, +}; + +static void nvmet_pci_epf_cq_work(struct work_struct *work); + +static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, + unsigned int qid, bool sq) +{ + struct nvmet_pci_epf_queue *queue; + + if (sq) { + queue = &ctrl->sq[qid]; + } else { + queue = &ctrl->cq[qid]; + INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); + } + queue->ctrl = ctrl; + queue->qid = qid; + spin_lock_init(&queue->lock); + INIT_LIST_HEAD(&queue->list); +} + +static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) +{ + unsigned int qid; + + ctrl->sq = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); + if (!ctrl->sq) + return -ENOMEM; + + ctrl->cq = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); + if (!ctrl->cq) { + kfree(ctrl->sq); + ctrl->sq = NULL; + return -ENOMEM; + } + + for (qid = 0; qid < ctrl->nr_queues; qid++) { + nvmet_pci_epf_init_queue(ctrl, qid, true); + nvmet_pci_epf_init_queue(ctrl, qid, false); + } + + return 0; +} + +static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) +{ + kfree(ctrl->sq); + ctrl->sq = NULL; + kfree(ctrl->cq); + ctrl->cq = NULL; +} + +static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) +{ + struct nvmet_pci_epf_iod *iod = + container_of(work, struct nvmet_pci_epf_iod, work); + struct nvmet_req *req = &iod->req; + int ret; + + if (!iod->ctrl->link_up) { + nvmet_pci_epf_free_iod(iod); + return; + } + + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { + iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + /* + * If nvmet_req_init() fails (e.g., unsupported opcode) it will call + * __nvmet_req_complete() internally which will call + * nvmet_pci_epf_queue_response() and will complete the command directly. + */ + if (!nvmet_req_init(req, &iod->sq->nvme_sq, &nvmet_pci_epf_fabrics_ops)) + return; + + iod->data_len = nvmet_req_transfer_len(req); + if (iod->data_len) { + /* + * Get the data DMA transfer direction. Here "device" means the + * PCI root-complex host. + */ + if (nvme_is_write(&iod->cmd)) + iod->dma_dir = DMA_FROM_DEVICE; + else + iod->dma_dir = DMA_TO_DEVICE; + + /* + * Setup the command data buffer and get the command data from + * the host if needed. + */ + ret = nvmet_pci_epf_alloc_iod_data_buf(iod); + if (!ret && iod->dma_dir == DMA_FROM_DEVICE) + ret = nvmet_pci_epf_transfer_iod_data(iod); + if (ret) { + nvmet_req_uninit(req); + goto complete; + } + } + + req->execute(req); + + /* + * If we do not have data to transfer after the command execution + * finishes, nvmet_pci_epf_queue_response() will complete the command + * directly. No need to wait for the completion in this case. + */ + if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) + return; + + wait_for_completion(&iod->done); + + if (iod->status != NVME_SC_SUCCESS) + return; + + WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); + nvmet_pci_epf_transfer_iod_data(iod); + +complete: + nvmet_pci_epf_complete_iod(iod); +} + +static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *sq) +{ + struct nvmet_pci_epf_iod *iod; + int ret, n = 0; + u16 head = sq->head; + + sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); + while (head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { + iod = nvmet_pci_epf_alloc_iod(sq); + if (!iod) + break; + + /* Get the NVMe command submitted by the host. */ + ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, + sq->pci_addr + head * sq->qes, + sq->qes, DMA_FROM_DEVICE); + if (ret) { + /* Not much we can do... */ + nvmet_pci_epf_free_iod(iod); + break; + } + + dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", + sq->qid, head, sq->tail, + nvmet_pci_epf_iod_name(iod)); + + head++; + if (head == sq->depth) + head = 0; + WRITE_ONCE(sq->head, head); + n++; + + queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); + + sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); + } + + return n; +} + +static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) +{ + struct nvmet_pci_epf_ctrl *ctrl = + container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); + struct nvmet_pci_epf_queue *sq; + unsigned long limit = jiffies; + unsigned long last = 0; + int i, nr_sqs; + + while (ctrl->link_up && ctrl->enabled) { + nr_sqs = 0; + /* Do round-robin arbitration. */ + for (i = 0; i < ctrl->nr_queues; i++) { + sq = &ctrl->sq[i]; + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + continue; + if (nvmet_pci_epf_process_sq(ctrl, sq)) + nr_sqs++; + } + + /* + * If we have been running for a while, reschedule to let other + * tasks run and to avoid RCU stalls. + */ + if (time_is_before_jiffies(limit + secs_to_jiffies(1))) { + cond_resched(); + limit = jiffies; + continue; + } + + if (nr_sqs) { + last = jiffies; + continue; + } + + /* + * If we have not received any command on any queue for more + * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and + * reschedule. This avoids "burning" a CPU when the controller + * is idle for a long time. + */ + if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE)) + break; + + cpu_relax(); + } + + schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); +} + +static void nvmet_pci_epf_cq_work(struct work_struct *work) +{ + struct nvmet_pci_epf_queue *cq = + container_of(work, struct nvmet_pci_epf_queue, work.work); + struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; + struct nvme_completion *cqe; + struct nvmet_pci_epf_iod *iod; + unsigned long flags; + int ret = 0, n = 0; + + while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { + + /* Check that the CQ is not full. */ + cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); + if (cq->head == cq->tail + 1) { + ret = -EAGAIN; + break; + } + + spin_lock_irqsave(&cq->lock, flags); + iod = list_first_entry_or_null(&cq->list, + struct nvmet_pci_epf_iod, link); + if (iod) + list_del_init(&iod->link); + spin_unlock_irqrestore(&cq->lock, flags); + + if (!iod) + break; + + /* + * Post the IOD completion entry. If the IOD request was + * executed (req->execute() called), the CQE is already + * initialized. However, the IOD may have been failed before + * that, leaving the CQE not properly initialized. So always + * initialize it here. + */ + cqe = &iod->cqe; + cqe->sq_head = cpu_to_le16(READ_ONCE(iod->sq->head)); + cqe->sq_id = cpu_to_le16(iod->sq->qid); + cqe->command_id = iod->cmd.common.command_id; + cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); + + dev_dbg(ctrl->dev, + "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n", + cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, + le64_to_cpu(cqe->result.u64), cq->head, cq->tail, + cq->phase); + + memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, + cqe, cq->qes); + + cq->tail++; + if (cq->tail >= cq->depth) { + cq->tail = 0; + cq->phase ^= 1; + } + + nvmet_pci_epf_free_iod(iod); + + /* Signal the host. */ + nvmet_pci_epf_raise_irq(ctrl, cq, false); + n++; + } + + /* + * We do not support precise IRQ coalescing time (100ns units as per + * NVMe specifications). So if we have posted completion entries without + * reaching the interrupt coalescing threshold, raise an interrupt. + */ + if (n) + nvmet_pci_epf_raise_irq(ctrl, cq, true); + + if (ret < 0) + queue_delayed_work(system_highpri_wq, &cq->work, + NVMET_PCI_EPF_CQ_RETRY_INTERVAL); +} + +static void nvmet_pci_epf_clear_ctrl_config(struct nvmet_pci_epf_ctrl *ctrl) +{ + struct nvmet_ctrl *tctrl = ctrl->tctrl; + + /* Initialize controller status. */ + tctrl->csts = 0; + ctrl->csts = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + + /* Initialize controller configuration and start polling. */ + tctrl->cc = 0; + ctrl->cc = 0; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); +} + +static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + u64 pci_addr, asq, acq; + u32 aqa; + u16 status, qsize; + + if (ctrl->enabled) + return 0; + + dev_info(ctrl->dev, "Enabling controller\n"); + + ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; + ctrl->mps = 1UL << ctrl->mps_shift; + ctrl->mps_mask = ctrl->mps - 1; + + ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); + if (ctrl->io_sqes < sizeof(struct nvme_command)) { + dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", + ctrl->io_sqes, sizeof(struct nvme_command)); + goto err; + } + + ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); + if (ctrl->io_cqes < sizeof(struct nvme_completion)) { + dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", + ctrl->io_cqes, sizeof(struct nvme_completion)); + goto err; + } + + /* Create the admin queue. */ + aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); + asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); + acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); + + qsize = (aqa & 0x0fff0000) >> 16; + pci_addr = acq & GENMASK_ULL(63, 12); + status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, + NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG, + qsize, pci_addr, 0); + if (status != NVME_SC_SUCCESS) { + dev_err(ctrl->dev, "Failed to create admin completion queue\n"); + goto err; + } + + qsize = aqa & 0x00000fff; + pci_addr = asq & GENMASK_ULL(63, 12); + status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, 0, + NVME_QUEUE_PHYS_CONTIG, qsize, pci_addr); + if (status != NVME_SC_SUCCESS) { + dev_err(ctrl->dev, "Failed to create admin submission queue\n"); + nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); + goto err; + } + + ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; + ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; + ctrl->enabled = true; + ctrl->csts = NVME_CSTS_RDY; + + /* Start polling the controller SQs. */ + schedule_delayed_work(&ctrl->poll_sqs, 0); + + return 0; + +err: + nvmet_pci_epf_clear_ctrl_config(ctrl); + return -EINVAL; +} + +static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl, + bool shutdown) +{ + int qid; + + if (!ctrl->enabled) + return; + + dev_info(ctrl->dev, "%s controller\n", + shutdown ? "Shutting down" : "Disabling"); + + ctrl->enabled = false; + cancel_delayed_work_sync(&ctrl->poll_sqs); + + /* Delete all I/O queues first. */ + for (qid = 1; qid < ctrl->nr_queues; qid++) + nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); + + for (qid = 1; qid < ctrl->nr_queues; qid++) + nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); + + /* Delete the admin queue last. */ + nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); + nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); + + ctrl->csts &= ~NVME_CSTS_RDY; + if (shutdown) { + ctrl->csts |= NVME_CSTS_SHST_CMPLT; + ctrl->cc &= ~NVME_CC_ENABLE; + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); + } +} + +static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) +{ + struct nvmet_pci_epf_ctrl *ctrl = + container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work); + u32 old_cc, new_cc; + int ret; + + if (!ctrl->tctrl) + return; + + old_cc = ctrl->cc; + new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); + if (new_cc == old_cc) + goto reschedule_work; + + ctrl->cc = new_cc; + + if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) { + ret = nvmet_pci_epf_enable_ctrl(ctrl); + if (ret) + goto reschedule_work; + } + + if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) + nvmet_pci_epf_disable_ctrl(ctrl, false); + + if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) + nvmet_pci_epf_disable_ctrl(ctrl, true); + + if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) + ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; + + nvmet_update_cc(ctrl->tctrl, ctrl->cc); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + +reschedule_work: + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); +} + +static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) +{ + struct nvmet_ctrl *tctrl = ctrl->tctrl; + + ctrl->bar = ctrl->nvme_epf->reg_bar; + + /* Copy the target controller capabilities as a base. */ + ctrl->cap = tctrl->cap; + + /* Contiguous Queues Required (CQR). */ + ctrl->cap |= 0x1ULL << 16; + + /* Set Doorbell stride to 4B (DSTRB). */ + ctrl->cap &= ~GENMASK_ULL(35, 32); + + /* Clear NVM Subsystem Reset Supported (NSSRS). */ + ctrl->cap &= ~(0x1ULL << 36); + + /* Clear Boot Partition Support (BPS). */ + ctrl->cap &= ~(0x1ULL << 45); + + /* Clear Persistent Memory Region Supported (PMRS). */ + ctrl->cap &= ~(0x1ULL << 56); + + /* Clear Controller Memory Buffer Supported (CMBS). */ + ctrl->cap &= ~(0x1ULL << 57); + + nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); + + nvmet_pci_epf_clear_ctrl_config(ctrl); +} + +static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, + unsigned int max_nr_queues) +{ + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + struct nvmet_alloc_ctrl_args args = {}; + char hostnqn[NVMF_NQN_SIZE]; + uuid_t id; + int ret; + + memset(ctrl, 0, sizeof(*ctrl)); + ctrl->dev = &nvme_epf->epf->dev; + mutex_init(&ctrl->irq_lock); + ctrl->nvme_epf = nvme_epf; + ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; + INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); + INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); + + ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, + max_nr_queues * NVMET_MAX_QUEUE_SIZE, + sizeof(struct nvmet_pci_epf_iod)); + if (ret) { + dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); + return ret; + } + + ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); + if (!ctrl->port) { + dev_err(ctrl->dev, "Port not found\n"); + ret = -EINVAL; + goto out_mempool_exit; + } + + /* Create the target controller. */ + uuid_gen(&id); + snprintf(hostnqn, NVMF_NQN_SIZE, + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); + args.port = ctrl->port; + args.subsysnqn = nvme_epf->subsysnqn; + memset(&id, 0, sizeof(uuid_t)); + args.hostid = &id; + args.hostnqn = hostnqn; + args.ops = &nvmet_pci_epf_fabrics_ops; + + ctrl->tctrl = nvmet_alloc_ctrl(&args); + if (!ctrl->tctrl) { + dev_err(ctrl->dev, "Failed to create target controller\n"); + ret = -ENOMEM; + goto out_mempool_exit; + } + ctrl->tctrl->drvdata = ctrl; + + /* We do not support protection information for now. */ + if (ctrl->tctrl->pi_support) { + dev_err(ctrl->dev, + "Protection information (PI) is not supported\n"); + ret = -ENOTSUPP; + goto out_put_ctrl; + } + + /* Allocate our queues, up to the maximum number. */ + ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); + ret = nvmet_pci_epf_alloc_queues(ctrl); + if (ret) + goto out_put_ctrl; + + /* + * Allocate the IRQ vectors descriptors. We cannot have more than the + * maximum number of queues. + */ + ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); + if (ret) + goto out_free_queues; + + dev_info(ctrl->dev, + "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", + ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, + ctrl->mdts); + + /* Initialize BAR 0 using the target controller CAP. */ + nvmet_pci_epf_init_bar(ctrl); + + return 0; + +out_free_queues: + nvmet_pci_epf_free_queues(ctrl); +out_put_ctrl: + nvmet_ctrl_put(ctrl->tctrl); + ctrl->tctrl = NULL; +out_mempool_exit: + mempool_exit(&ctrl->iod_pool); + return ret; +} + +static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + + dev_info(ctrl->dev, "PCI link up\n"); + ctrl->link_up = true; + + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); +} + +static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + dev_info(ctrl->dev, "PCI link down\n"); + ctrl->link_up = false; + + cancel_delayed_work_sync(&ctrl->poll_cc); + + nvmet_pci_epf_disable_ctrl(ctrl, false); + nvmet_pci_epf_clear_ctrl_config(ctrl); +} + +static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + if (!ctrl->tctrl) + return; + + dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", + ctrl->tctrl->subsys->subsysnqn); + + nvmet_pci_epf_stop_ctrl(ctrl); + + nvmet_pci_epf_free_queues(ctrl); + nvmet_pci_epf_free_irq_vectors(ctrl); + + nvmet_ctrl_put(ctrl->tctrl); + ctrl->tctrl = NULL; + + mempool_exit(&ctrl->iod_pool); +} + +static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + size_t reg_size, reg_bar_size; + size_t msix_table_size = 0; + + /* + * The first free BAR will be our register BAR and per NVMe + * specifications, it must be BAR 0. + */ + if (pci_epc_get_first_free_bar(epc_features) != BAR_0) { + dev_err(&epf->dev, "BAR 0 is not free\n"); + return -ENODEV; + } + + /* + * While NVMe PCIe Transport Specification 1.1, section 2.1.10, claims + * that the BAR0 type is Implementation Specific, in NVMe 1.1, the type + * is required to be 64-bit. Thus, for interoperability, always set the + * type to 64-bit. In the rare case that the PCI EPC does not support + * configuring BAR0 as 64-bit, the call to pci_epc_set_bar() will fail, + * and we will return failure back to the user. + */ + epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + /* + * Calculate the size of the register bar: NVMe registers first with + * enough space for the doorbells, followed by the MSI-X table + * if supported. + */ + reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32)); + reg_size = ALIGN(reg_size, 8); + + if (epc_features->msix_capable) { + size_t pba_size; + + msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; + nvme_epf->msix_table_offset = reg_size; + pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); + + reg_size += msix_table_size + pba_size; + } + + if (epc_features->bar[BAR_0].type == BAR_FIXED) { + if (reg_size > epc_features->bar[BAR_0].fixed_size) { + dev_err(&epf->dev, + "BAR 0 size %llu B too small, need %zu B\n", + epc_features->bar[BAR_0].fixed_size, + reg_size); + return -ENOMEM; + } + reg_bar_size = epc_features->bar[BAR_0].fixed_size; + } else { + reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); + } + + nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, + epc_features, PRIMARY_INTERFACE); + if (!nvme_epf->reg_bar) { + dev_err(&epf->dev, "Failed to allocate BAR 0\n"); + return -ENOMEM; + } + memset(nvme_epf->reg_bar, 0, reg_bar_size); + + return 0; +} + +static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + + if (!nvme_epf->reg_bar) + return; + + pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); + nvme_epf->reg_bar = NULL; +} + +static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + + pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, + &epf->bar[BAR_0]); +} + +static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf) +{ + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + struct pci_epf *epf = nvme_epf->epf; + int ret; + + /* Enable MSI-X if supported, otherwise, use MSI. */ + if (epc_features->msix_capable && epf->msix_interrupts) { + ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, + epf->msix_interrupts, BAR_0, + nvme_epf->msix_table_offset); + if (ret) { + dev_err(&epf->dev, "Failed to configure MSI-X\n"); + return ret; + } + + nvme_epf->nr_vectors = epf->msix_interrupts; + nvme_epf->irq_type = PCI_IRQ_MSIX; + + return 0; + } + + if (epc_features->msi_capable && epf->msi_interrupts) { + ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, + epf->msi_interrupts); + if (ret) { + dev_err(&epf->dev, "Failed to configure MSI\n"); + return ret; + } + + nvme_epf->nr_vectors = epf->msi_interrupts; + nvme_epf->irq_type = PCI_IRQ_MSI; + + return 0; + } + + /* MSI and MSI-X are not supported: fall back to INTx. */ + nvme_epf->nr_vectors = 1; + nvme_epf->irq_type = PCI_IRQ_INTX; + + return 0; +} + +static int nvmet_pci_epf_epc_init(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + unsigned int max_nr_queues = NVMET_NR_QUEUES; + int ret; + + /* For now, do not support virtual functions. */ + if (epf->vfunc_no > 0) { + dev_err(&epf->dev, "Virtual functions are not supported\n"); + return -EINVAL; + } + + /* + * Cap the maximum number of queues we can support on the controller + * with the number of IRQs we can use. + */ + if (epc_features->msix_capable && epf->msix_interrupts) { + dev_info(&epf->dev, + "PCI endpoint controller supports MSI-X, %u vectors\n", + epf->msix_interrupts); + max_nr_queues = min(max_nr_queues, epf->msix_interrupts); + } else if (epc_features->msi_capable && epf->msi_interrupts) { + dev_info(&epf->dev, + "PCI endpoint controller supports MSI, %u vectors\n", + epf->msi_interrupts); + max_nr_queues = min(max_nr_queues, epf->msi_interrupts); + } + + if (max_nr_queues < 2) { + dev_err(&epf->dev, "Invalid maximum number of queues %u\n", + max_nr_queues); + return -EINVAL; + } + + /* Create the target controller. */ + ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues); + if (ret) { + dev_err(&epf->dev, + "Failed to create NVMe PCI target controller (err=%d)\n", + ret); + return ret; + } + + nvmet_pci_epf_init_dma(nvme_epf); + + /* Set device ID, class, etc. */ + epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; + epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; + ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, + epf->header); + if (ret) { + dev_err(&epf->dev, + "Failed to write configuration header (err=%d)\n", ret); + goto out_destroy_ctrl; + } + + ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, + &epf->bar[BAR_0]); + if (ret) { + dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); + goto out_destroy_ctrl; + } + + /* + * Enable interrupts and start polling the controller BAR if we do not + * have a link up notifier. + */ + ret = nvmet_pci_epf_init_irq(nvme_epf); + if (ret) + goto out_clear_bar; + + if (!epc_features->linkup_notifier) + nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); + + return 0; + +out_clear_bar: + nvmet_pci_epf_clear_bar(nvme_epf); +out_destroy_ctrl: + nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); + return ret; +} + +static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + nvmet_pci_epf_destroy_ctrl(ctrl); + + nvmet_pci_epf_deinit_dma(nvme_epf); + nvmet_pci_epf_clear_bar(nvme_epf); +} + +static int nvmet_pci_epf_link_up(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + nvmet_pci_epf_start_ctrl(ctrl); + + return 0; +} + +static int nvmet_pci_epf_link_down(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + nvmet_pci_epf_stop_ctrl(ctrl); + + return 0; +} + +static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = { + .epc_init = nvmet_pci_epf_epc_init, + .epc_deinit = nvmet_pci_epf_epc_deinit, + .link_up = nvmet_pci_epf_link_up, + .link_down = nvmet_pci_epf_link_down, +}; + +static int nvmet_pci_epf_bind(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features; + struct pci_epc *epc = epf->epc; + int ret; + + if (WARN_ON_ONCE(!epc)) + return -EINVAL; + + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); + if (!epc_features) { + dev_err(&epf->dev, "epc_features not implemented\n"); + return -EOPNOTSUPP; + } + nvme_epf->epc_features = epc_features; + + ret = nvmet_pci_epf_configure_bar(nvme_epf); + if (ret) + return ret; + + return 0; +} + +static void nvmet_pci_epf_unbind(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct pci_epc *epc = epf->epc; + + nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); + + if (epc->init_complete) { + nvmet_pci_epf_deinit_dma(nvme_epf); + nvmet_pci_epf_clear_bar(nvme_epf); + } + + nvmet_pci_epf_free_bar(nvme_epf); +} + +static struct pci_epf_header nvme_epf_pci_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .progif_code = 0x02, /* NVM Express */ + .baseclass_code = PCI_BASE_CLASS_STORAGE, + .subclass_code = 0x08, /* Non-Volatile Memory controller */ + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +static int nvmet_pci_epf_probe(struct pci_epf *epf, + const struct pci_epf_device_id *id) +{ + struct nvmet_pci_epf *nvme_epf; + int ret; + + nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); + if (!nvme_epf) + return -ENOMEM; + + ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); + if (ret) + return ret; + + nvme_epf->epf = epf; + nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; + + epf->event_ops = &nvmet_pci_epf_event_ops; + epf->header = &nvme_epf_pci_header; + epf_set_drvdata(epf, nvme_epf); + + return 0; +} + +#define to_nvme_epf(epf_group) \ + container_of(epf_group, struct nvmet_pci_epf, group) + +static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); +} + +static ssize_t nvmet_pci_epf_portid_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + u16 portid; + + /* Do not allow setting this when the function is already started. */ + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + if (!len) + return -EINVAL; + + if (kstrtou16(page, 0, &portid)) + return -EINVAL; + + nvme_epf->portid = cpu_to_le16(portid); + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, portid); + +static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item, + char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); +} + +static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + /* Do not allow setting this when the function is already started. */ + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + if (!len) + return -EINVAL; + + strscpy(nvme_epf->subsysnqn, page, len); + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn); + +static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); +} + +static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + unsigned long mdts_kb; + int ret; + + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + ret = kstrtoul(page, 0, &mdts_kb); + if (ret) + return ret; + if (!mdts_kb) + mdts_kb = NVMET_PCI_EPF_MDTS_KB; + else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB) + mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB; + + if (!is_power_of_2(mdts_kb)) + return -EINVAL; + + nvme_epf->mdts_kb = mdts_kb; + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb); + +static struct configfs_attribute *nvmet_pci_epf_attrs[] = { + &nvmet_pci_epf_attr_portid, + &nvmet_pci_epf_attr_subsysnqn, + &nvmet_pci_epf_attr_mdts_kb, + NULL, +}; + +static const struct config_item_type nvmet_pci_epf_group_type = { + .ct_attrs = nvmet_pci_epf_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + + config_group_init_type_name(&nvme_epf->group, "nvme", + &nvmet_pci_epf_group_type); + + return &nvme_epf->group; +} + +static const struct pci_epf_device_id nvmet_pci_epf_ids[] = { + { .name = "nvmet_pci_epf" }, + {}, +}; + +static struct pci_epf_ops nvmet_pci_epf_ops = { + .bind = nvmet_pci_epf_bind, + .unbind = nvmet_pci_epf_unbind, + .add_cfs = nvmet_pci_epf_add_cfs, +}; + +static struct pci_epf_driver nvmet_pci_epf_driver = { + .driver.name = "nvmet_pci_epf", + .probe = nvmet_pci_epf_probe, + .id_table = nvmet_pci_epf_ids, + .ops = &nvmet_pci_epf_ops, + .owner = THIS_MODULE, +}; + +static int __init nvmet_pci_epf_init_module(void) +{ + int ret; + + ret = pci_epf_register_driver(&nvmet_pci_epf_driver); + if (ret) + return ret; + + ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops); + if (ret) { + pci_epf_unregister_driver(&nvmet_pci_epf_driver); + return ret; + } + + return 0; +} + +static void __exit nvmet_pci_epf_cleanup_module(void) +{ + nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops); + pci_epf_unregister_driver(&nvmet_pci_epf_driver); +} + +module_init(nvmet_pci_epf_init_module); +module_exit(nvmet_pci_epf_cleanup_module); + +MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver"); +MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/nvme/target/pr.c b/drivers/nvme/target/pr.c new file mode 100644 index 000000000000..cd22d8333314 --- /dev/null +++ b/drivers/nvme/target/pr.c @@ -0,0 +1,1155 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe over Fabrics Persist Reservation. + * Copyright (c) 2024 Guixin Liu, Alibaba Group. + * All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/unaligned.h> +#include "nvmet.h" + +#define NVMET_PR_NOTIFI_MASK_ALL \ + (1 << NVME_PR_NOTIFY_BIT_REG_PREEMPTED | \ + 1 << NVME_PR_NOTIFY_BIT_RESV_RELEASED | \ + 1 << NVME_PR_NOTIFY_BIT_RESV_PREEMPTED) + +static inline bool nvmet_pr_parse_ignore_key(u32 cdw10) +{ + /* Ignore existing key, bit 03. */ + return (cdw10 >> 3) & 1; +} + +static inline struct nvmet_ns *nvmet_pr_to_ns(struct nvmet_pr *pr) +{ + return container_of(pr, struct nvmet_ns, pr); +} + +static struct nvmet_pr_registrant * +nvmet_pr_find_registrant(struct nvmet_pr *pr, uuid_t *hostid) +{ + struct nvmet_pr_registrant *reg; + + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, hostid)) + return reg; + } + return NULL; +} + +u16 nvmet_set_feat_resv_notif_mask(struct nvmet_req *req, u32 mask) +{ + u32 nsid = le32_to_cpu(req->cmd->common.nsid); + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_ns *ns; + unsigned long idx; + u16 status; + + if (mask & ~(NVMET_PR_NOTIFI_MASK_ALL)) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + if (nsid != U32_MAX) { + status = nvmet_req_find_ns(req); + if (status) + return status; + if (!req->ns->pr.enable) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + + WRITE_ONCE(req->ns->pr.notify_mask, mask); + goto success; + } + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->pr.enable) + WRITE_ONCE(ns->pr.notify_mask, mask); + } + +success: + nvmet_set_result(req, mask); + return NVME_SC_SUCCESS; +} + +u16 nvmet_get_feat_resv_notif_mask(struct nvmet_req *req) +{ + u16 status; + + status = nvmet_req_find_ns(req); + if (status) + return status; + + if (!req->ns->pr.enable) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + + nvmet_set_result(req, READ_ONCE(req->ns->pr.notify_mask)); + return status; +} + +void nvmet_execute_get_log_page_resv(struct nvmet_req *req) +{ + struct nvmet_pr_log_mgr *log_mgr = &req->sq->ctrl->pr_log_mgr; + struct nvme_pr_log next_log = {0}; + struct nvme_pr_log log = {0}; + u16 status = NVME_SC_SUCCESS; + u64 lost_count; + u64 cur_count; + u64 next_count; + + mutex_lock(&log_mgr->lock); + if (!kfifo_get(&log_mgr->log_queue, &log)) + goto out; + + /* + * We can't get the last in kfifo. + * Utilize the current count and the count from the next log to + * calculate the number of lost logs, while also addressing cases + * of overflow. If there is no subsequent log, the number of lost + * logs is equal to the lost_count within the nvmet_pr_log_mgr. + */ + cur_count = le64_to_cpu(log.count); + if (kfifo_peek(&log_mgr->log_queue, &next_log)) { + next_count = le64_to_cpu(next_log.count); + if (next_count > cur_count) + lost_count = next_count - cur_count - 1; + else + lost_count = U64_MAX - cur_count + next_count - 1; + } else { + lost_count = log_mgr->lost_count; + } + + log.count = cpu_to_le64((cur_count + lost_count) == 0 ? + 1 : (cur_count + lost_count)); + log_mgr->lost_count -= lost_count; + + log.nr_pages = kfifo_len(&log_mgr->log_queue); + +out: + status = nvmet_copy_to_sgl(req, 0, &log, sizeof(log)); + mutex_unlock(&log_mgr->lock); + nvmet_req_complete(req, status); +} + +static void nvmet_pr_add_resv_log(struct nvmet_ctrl *ctrl, u8 log_type, + u32 nsid) +{ + struct nvmet_pr_log_mgr *log_mgr = &ctrl->pr_log_mgr; + struct nvme_pr_log log = {0}; + + mutex_lock(&log_mgr->lock); + log_mgr->counter++; + if (log_mgr->counter == 0) + log_mgr->counter = 1; + + log.count = cpu_to_le64(log_mgr->counter); + log.type = log_type; + log.nsid = cpu_to_le32(nsid); + + if (!kfifo_put(&log_mgr->log_queue, log)) { + pr_info("a reservation log lost, cntlid:%d, log_type:%d, nsid:%d\n", + ctrl->cntlid, log_type, nsid); + log_mgr->lost_count++; + } + + mutex_unlock(&log_mgr->lock); +} + +static void nvmet_pr_resv_released(struct nvmet_pr *pr, uuid_t *hostid) +{ + struct nvmet_ns *ns = nvmet_pr_to_ns(pr); + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; + + if (test_bit(NVME_PR_NOTIFY_BIT_RESV_RELEASED, &pr->notify_mask)) + return; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (!uuid_equal(&ctrl->hostid, hostid) && + nvmet_pr_find_registrant(pr, &ctrl->hostid)) { + nvmet_pr_add_resv_log(ctrl, + NVME_PR_LOG_RESERVATION_RELEASED, ns->nsid); + nvmet_add_async_event(ctrl, NVME_AER_CSS, + NVME_AEN_RESV_LOG_PAGE_AVALIABLE, + NVME_LOG_RESERVATION); + } + } + mutex_unlock(&subsys->lock); +} + +static void nvmet_pr_send_event_to_host(struct nvmet_pr *pr, uuid_t *hostid, + u8 log_type) +{ + struct nvmet_ns *ns = nvmet_pr_to_ns(pr); + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_ctrl *ctrl; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (uuid_equal(hostid, &ctrl->hostid)) { + nvmet_pr_add_resv_log(ctrl, log_type, ns->nsid); + nvmet_add_async_event(ctrl, NVME_AER_CSS, + NVME_AEN_RESV_LOG_PAGE_AVALIABLE, + NVME_LOG_RESERVATION); + } + } + mutex_unlock(&subsys->lock); +} + +static void nvmet_pr_resv_preempted(struct nvmet_pr *pr, uuid_t *hostid) +{ + if (test_bit(NVME_PR_NOTIFY_BIT_RESV_PREEMPTED, &pr->notify_mask)) + return; + + nvmet_pr_send_event_to_host(pr, hostid, + NVME_PR_LOG_RESERVATOIN_PREEMPTED); +} + +static void nvmet_pr_registration_preempted(struct nvmet_pr *pr, + uuid_t *hostid) +{ + if (test_bit(NVME_PR_NOTIFY_BIT_REG_PREEMPTED, &pr->notify_mask)) + return; + + nvmet_pr_send_event_to_host(pr, hostid, + NVME_PR_LOG_REGISTRATION_PREEMPTED); +} + +static inline void nvmet_pr_set_new_holder(struct nvmet_pr *pr, u8 new_rtype, + struct nvmet_pr_registrant *reg) +{ + reg->rtype = new_rtype; + rcu_assign_pointer(pr->holder, reg); +} + +static u16 nvmet_pr_register(struct nvmet_req *req, + struct nvmet_pr_register_data *d) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_registrant *new, *reg; + struct nvmet_pr *pr = &req->ns->pr; + u16 status = NVME_SC_SUCCESS; + u64 nrkey = le64_to_cpu(d->nrkey); + + new = kmalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NVME_SC_INTERNAL; + + down(&pr->pr_sem); + reg = nvmet_pr_find_registrant(pr, &ctrl->hostid); + if (reg) { + if (reg->rkey != nrkey) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + kfree(new); + goto out; + } + + memset(new, 0, sizeof(*new)); + INIT_LIST_HEAD(&new->entry); + new->rkey = nrkey; + uuid_copy(&new->hostid, &ctrl->hostid); + list_add_tail_rcu(&new->entry, &pr->registrant_list); + +out: + up(&pr->pr_sem); + return status; +} + +static void nvmet_pr_unregister_one(struct nvmet_pr *pr, + struct nvmet_pr_registrant *reg) +{ + struct nvmet_pr_registrant *first_reg; + struct nvmet_pr_registrant *holder; + u8 original_rtype; + + list_del_rcu(®->entry); + + holder = rcu_dereference_protected(pr->holder, 1); + if (reg != holder) + goto out; + + original_rtype = holder->rtype; + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + first_reg = list_first_or_null_rcu(&pr->registrant_list, + struct nvmet_pr_registrant, entry); + if (first_reg) + first_reg->rtype = original_rtype; + rcu_assign_pointer(pr->holder, first_reg); + } else { + rcu_assign_pointer(pr->holder, NULL); + + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_REG_ONLY || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY) + nvmet_pr_resv_released(pr, ®->hostid); + } +out: + kfree_rcu(reg, rcu); +} + +static u16 nvmet_pr_unregister(struct nvmet_req *req, + struct nvmet_pr_register_data *d, + bool ignore_key) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid)) { + if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) { + status = NVME_SC_SUCCESS; + nvmet_pr_unregister_one(pr, reg); + } + break; + } + } + up(&pr->pr_sem); + + return status; +} + +static void nvmet_pr_update_reg_rkey(struct nvmet_pr_registrant *reg, + void *attr) +{ + reg->rkey = *(u64 *)attr; +} + +static u16 nvmet_pr_update_reg_attr(struct nvmet_pr *pr, + struct nvmet_pr_registrant *reg, + void (*change_attr)(struct nvmet_pr_registrant *reg, + void *attr), + void *attr) +{ + struct nvmet_pr_registrant *holder; + struct nvmet_pr_registrant *new; + + holder = rcu_dereference_protected(pr->holder, 1); + if (reg != holder) { + change_attr(reg, attr); + return NVME_SC_SUCCESS; + } + + new = kmalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + return NVME_SC_INTERNAL; + + new->rkey = holder->rkey; + new->rtype = holder->rtype; + uuid_copy(&new->hostid, &holder->hostid); + INIT_LIST_HEAD(&new->entry); + + change_attr(new, attr); + list_replace_rcu(&holder->entry, &new->entry); + rcu_assign_pointer(pr->holder, new); + kfree_rcu(holder, rcu); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pr_replace(struct nvmet_req *req, + struct nvmet_pr_register_data *d, + bool ignore_key) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + u64 nrkey = le64_to_cpu(d->nrkey); + + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid)) { + if (ignore_key || reg->rkey == le64_to_cpu(d->crkey)) + status = nvmet_pr_update_reg_attr(pr, reg, + nvmet_pr_update_reg_rkey, + &nrkey); + break; + } + } + up(&pr->pr_sem); + return status; +} + +static void nvmet_execute_pr_register(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + struct nvmet_pr_register_data *d; + u8 reg_act = cdw10 & 0x07; /* Reservation Register Action, bit 02:00 */ + u16 status; + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + switch (reg_act) { + case NVME_PR_REGISTER_ACT_REG: + status = nvmet_pr_register(req, d); + break; + case NVME_PR_REGISTER_ACT_UNREG: + status = nvmet_pr_unregister(req, d, ignore_key); + break; + case NVME_PR_REGISTER_ACT_REPLACE: + status = nvmet_pr_replace(req, d, ignore_key); + break; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + break; + } +free_data: + kfree(d); +out: + if (!status) + atomic_inc(&req->ns->pr.generation); + nvmet_req_complete(req, status); +} + +static u16 nvmet_pr_acquire(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype) +{ + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + + holder = rcu_dereference_protected(pr->holder, 1); + if (holder && reg != holder) + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + if (holder && reg == holder) { + if (holder->rtype == rtype) + return NVME_SC_SUCCESS; + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + } + + nvmet_pr_set_new_holder(pr, rtype, reg); + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_confirm_ns_pc_ref(struct percpu_ref *ref) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref = + container_of(ref, struct nvmet_pr_per_ctrl_ref, ref); + + complete(&pc_ref->confirm_done); +} + +static void nvmet_pr_set_ctrl_to_abort(struct nvmet_req *req, uuid_t *hostid) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = req->ns; + unsigned long idx; + + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + if (uuid_equal(&pc_ref->hostid, hostid)) { + percpu_ref_kill_and_confirm(&pc_ref->ref, + nvmet_pr_confirm_ns_pc_ref); + wait_for_completion(&pc_ref->confirm_done); + } + } +} + +static u16 nvmet_pr_unreg_all_host_by_prkey(struct nvmet_req *req, u64 prkey, + uuid_t *send_hostid, + bool abort) +{ + u16 status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (reg->rkey == prkey) { + status = NVME_SC_SUCCESS; + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + if (!uuid_equal(&hostid, send_hostid)) + nvmet_pr_registration_preempted(pr, &hostid); + } + } + return status; +} + +static void nvmet_pr_unreg_all_others_by_prkey(struct nvmet_req *req, + u64 prkey, + uuid_t *send_hostid, + bool abort) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (reg->rkey == prkey && + !uuid_equal(®->hostid, send_hostid)) { + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + nvmet_pr_registration_preempted(pr, &hostid); + } + } +} + +static void nvmet_pr_unreg_all_others(struct nvmet_req *req, + uuid_t *send_hostid, + bool abort) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + uuid_t hostid; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + if (!uuid_equal(®->hostid, send_hostid)) { + uuid_copy(&hostid, ®->hostid); + if (abort) + nvmet_pr_set_ctrl_to_abort(req, &hostid); + nvmet_pr_unregister_one(pr, reg); + nvmet_pr_registration_preempted(pr, &hostid); + } + } +} + +static void nvmet_pr_update_holder_rtype(struct nvmet_pr_registrant *reg, + void *attr) +{ + u8 new_rtype = *(u8 *)attr; + + reg->rtype = new_rtype; +} + +static u16 nvmet_pr_preempt(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype, + struct nvmet_pr_acquire_data *d, + bool abort) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + enum nvme_pr_type original_rtype; + u64 prkey = le64_to_cpu(d->prkey); + u16 status; + + holder = rcu_dereference_protected(pr->holder, 1); + if (!holder) + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + + original_rtype = holder->rtype; + if (original_rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + original_rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + if (!prkey) { + /* + * To prevent possible access from other hosts, and + * avoid terminate the holder, set the new holder + * first before unregistering. + */ + nvmet_pr_set_new_holder(pr, rtype, reg); + nvmet_pr_unreg_all_others(req, &ctrl->hostid, abort); + return NVME_SC_SUCCESS; + } + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + } + + if (holder == reg) { + status = nvmet_pr_update_reg_attr(pr, holder, + nvmet_pr_update_holder_rtype, &rtype); + if (!status && original_rtype != rtype) + nvmet_pr_resv_released(pr, ®->hostid); + return status; + } + + if (prkey == holder->rkey) { + /* + * Same as before, set the new holder first. + */ + nvmet_pr_set_new_holder(pr, rtype, reg); + nvmet_pr_unreg_all_others_by_prkey(req, prkey, &ctrl->hostid, + abort); + if (original_rtype != rtype) + nvmet_pr_resv_released(pr, ®->hostid); + return NVME_SC_SUCCESS; + } + + if (prkey) + return nvmet_pr_unreg_all_host_by_prkey(req, prkey, + &ctrl->hostid, abort); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; +} + +static void nvmet_pr_do_abort(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, r.abort_work); + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = req->ns; + unsigned long idx; + + /* + * The target does not support abort, just wait per-controller ref to 0. + */ + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + if (percpu_ref_is_dying(&pc_ref->ref)) { + wait_for_completion(&pc_ref->free_done); + reinit_completion(&pc_ref->confirm_done); + reinit_completion(&pc_ref->free_done); + percpu_ref_resurrect(&pc_ref->ref); + } + } + + up(&ns->pr.pr_sem); + nvmet_req_complete(req, NVME_SC_SUCCESS); +} + +static u16 __nvmet_execute_pr_acquire(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 acquire_act, + u8 rtype, + struct nvmet_pr_acquire_data *d) +{ + u16 status; + + switch (acquire_act) { + case NVME_PR_ACQUIRE_ACT_ACQUIRE: + status = nvmet_pr_acquire(req, reg, rtype); + goto out; + case NVME_PR_ACQUIRE_ACT_PREEMPT: + status = nvmet_pr_preempt(req, reg, rtype, d, false); + goto inc_gen; + case NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT: + status = nvmet_pr_preempt(req, reg, rtype, d, true); + goto inc_gen; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + goto out; + } +inc_gen: + if (!status) + atomic_inc(&req->ns->pr.generation); +out: + return status; +} + +static void nvmet_execute_pr_acquire(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + /* Reservation type, bit 15:08 */ + u8 rtype = (u8)((cdw10 >> 8) & 0xff); + /* Reservation acquire action, bit 02:00 */ + u8 acquire_act = cdw10 & 0x07; + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_acquire_data *d = NULL; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *reg; + u16 status = NVME_SC_SUCCESS; + + if (ignore_key || + rtype < NVME_PR_WRITE_EXCLUSIVE || + rtype > NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid) && + reg->rkey == le64_to_cpu(d->crkey)) { + status = __nvmet_execute_pr_acquire(req, reg, + acquire_act, rtype, d); + break; + } + } + + if (!status && acquire_act == NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT) { + kfree(d); + INIT_WORK(&req->r.abort_work, nvmet_pr_do_abort); + queue_work(nvmet_wq, &req->r.abort_work); + return; + } + + up(&pr->pr_sem); + +free_data: + kfree(d); +out: + nvmet_req_complete(req, status); +} + +static u16 nvmet_pr_release(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 rtype) +{ + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + u8 original_rtype; + + holder = rcu_dereference_protected(pr->holder, 1); + if (!holder || reg != holder) + return NVME_SC_SUCCESS; + + original_rtype = holder->rtype; + if (original_rtype != rtype) + return NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + + rcu_assign_pointer(pr->holder, NULL); + + if (original_rtype != NVME_PR_WRITE_EXCLUSIVE && + original_rtype != NVME_PR_EXCLUSIVE_ACCESS) + nvmet_pr_resv_released(pr, ®->hostid); + + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_clear(struct nvmet_req *req) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr *pr = &req->ns->pr; + + rcu_assign_pointer(pr->holder, NULL); + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + list_del_rcu(®->entry); + if (!uuid_equal(&req->sq->ctrl->hostid, ®->hostid)) + nvmet_pr_resv_preempted(pr, ®->hostid); + kfree_rcu(reg, rcu); + } + + atomic_inc(&pr->generation); +} + +static u16 __nvmet_execute_pr_release(struct nvmet_req *req, + struct nvmet_pr_registrant *reg, + u8 release_act, u8 rtype) +{ + switch (release_act) { + case NVME_PR_RELEASE_ACT_RELEASE: + return nvmet_pr_release(req, reg, rtype); + case NVME_PR_RELEASE_ACT_CLEAR: + nvmet_pr_clear(req); + return NVME_SC_SUCCESS; + default: + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_OPCODE | NVME_STATUS_DNR; + } +} + +static void nvmet_execute_pr_release(struct nvmet_req *req) +{ + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + bool ignore_key = nvmet_pr_parse_ignore_key(cdw10); + u8 rtype = (u8)((cdw10 >> 8) & 0xff); /* Reservation type, bit 15:08 */ + u8 release_act = cdw10 & 0x07; /* Reservation release action, bit 02:00 */ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_release_data *d; + struct nvmet_pr_registrant *reg; + u16 status; + + if (ignore_key) { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) { + status = NVME_SC_INTERNAL; + goto out; + } + + status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (status) + goto free_data; + + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + down(&pr->pr_sem); + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + if (uuid_equal(®->hostid, &ctrl->hostid) && + reg->rkey == le64_to_cpu(d->crkey)) { + status = __nvmet_execute_pr_release(req, reg, + release_act, rtype); + break; + } + } + up(&pr->pr_sem); +free_data: + kfree(d); +out: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_pr_report(struct nvmet_req *req) +{ + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); + u32 num_bytes = 4 * (cdw10 + 1); /* cdw10 is number of dwords */ + u8 eds = cdw11 & 1; /* Extended data structure, bit 00 */ + struct nvme_registered_ctrl_ext *ctrl_eds; + struct nvme_reservation_status_ext *data; + struct nvmet_pr *pr = &req->ns->pr; + struct nvmet_pr_registrant *holder; + struct nvmet_pr_registrant *reg; + u16 num_ctrls = 0; + u16 status; + u8 rtype; + + /* nvmet hostid(uuid_t) is 128 bit. */ + if (!eds) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + status = NVME_SC_HOST_ID_INCONSIST | NVME_STATUS_DNR; + goto out; + } + + if (num_bytes < sizeof(struct nvme_reservation_status_ext)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + + data = kzalloc(num_bytes, GFP_KERNEL); + if (!data) { + status = NVME_SC_INTERNAL; + goto out; + } + data->gen = cpu_to_le32(atomic_read(&pr->generation)); + data->ptpls = 0; + ctrl_eds = data->regctl_eds; + + rcu_read_lock(); + holder = rcu_dereference(pr->holder); + rtype = holder ? holder->rtype : 0; + data->rtype = rtype; + + list_for_each_entry_rcu(reg, &pr->registrant_list, entry) { + num_ctrls++; + /* + * continue to get the number of all registrans. + */ + if (((void *)ctrl_eds + sizeof(*ctrl_eds)) > + ((void *)data + num_bytes)) + continue; + /* + * Dynamic controller, set cntlid to 0xffff. + */ + ctrl_eds->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); + if (rtype == NVME_PR_WRITE_EXCLUSIVE_ALL_REGS || + rtype == NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS) + ctrl_eds->rcsts = 1; + if (reg == holder) + ctrl_eds->rcsts = 1; + uuid_copy((uuid_t *)&ctrl_eds->hostid, ®->hostid); + ctrl_eds->rkey = cpu_to_le64(reg->rkey); + ctrl_eds++; + } + rcu_read_unlock(); + + put_unaligned_le16(num_ctrls, data->regctl); + status = nvmet_copy_to_sgl(req, 0, data, num_bytes); + kfree(data); +out: + nvmet_req_complete(req, status); +} + +u16 nvmet_parse_pr_cmd(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->common.opcode) { + case nvme_cmd_resv_register: + req->execute = nvmet_execute_pr_register; + break; + case nvme_cmd_resv_acquire: + req->execute = nvmet_execute_pr_acquire; + break; + case nvme_cmd_resv_release: + req->execute = nvmet_execute_pr_release; + break; + case nvme_cmd_resv_report: + req->execute = nvmet_execute_pr_report; + break; + default: + return 1; + } + return NVME_SC_SUCCESS; +} + +static bool nvmet_is_req_write_cmd_group(struct nvmet_req *req) +{ + u8 opcode = req->cmd->common.opcode; + + if (req->sq->qid) { + switch (opcode) { + case nvme_cmd_flush: + case nvme_cmd_write: + case nvme_cmd_write_zeroes: + case nvme_cmd_dsm: + case nvme_cmd_zone_append: + case nvme_cmd_zone_mgmt_send: + return true; + default: + return false; + } + } + return false; +} + +static bool nvmet_is_req_read_cmd_group(struct nvmet_req *req) +{ + u8 opcode = req->cmd->common.opcode; + + if (req->sq->qid) { + switch (opcode) { + case nvme_cmd_read: + case nvme_cmd_zone_mgmt_recv: + return true; + default: + return false; + } + } + return false; +} + +u16 nvmet_pr_check_cmd_access(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_pr_registrant *holder; + struct nvmet_ns *ns = req->ns; + struct nvmet_pr *pr = &ns->pr; + u16 status = NVME_SC_SUCCESS; + + rcu_read_lock(); + holder = rcu_dereference(pr->holder); + if (!holder) + goto unlock; + if (uuid_equal(&ctrl->hostid, &holder->hostid)) + goto unlock; + + /* + * The Reservation command group is checked in executing, + * allow it here. + */ + switch (holder->rtype) { + case NVME_PR_WRITE_EXCLUSIVE: + if (nvmet_is_req_write_cmd_group(req)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_EXCLUSIVE_ACCESS: + if (nvmet_is_req_read_cmd_group(req) || + nvmet_is_req_write_cmd_group(req)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY: + case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS: + if ((nvmet_is_req_write_cmd_group(req)) && + !nvmet_pr_find_registrant(pr, &ctrl->hostid)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY: + case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS: + if ((nvmet_is_req_read_cmd_group(req) || + nvmet_is_req_write_cmd_group(req)) && + !nvmet_pr_find_registrant(pr, &ctrl->hostid)) + status = NVME_SC_RESERVATION_CONFLICT | NVME_STATUS_DNR; + break; + default: + pr_warn("the reservation type is set wrong, type:%d\n", + holder->rtype); + break; + } + +unlock: + rcu_read_unlock(); + if (status) + req->error_loc = offsetof(struct nvme_common_command, opcode); + return status; +} + +u16 nvmet_pr_get_ns_pc_ref(struct nvmet_req *req) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + + pc_ref = xa_load(&req->ns->pr_per_ctrl_refs, + req->sq->ctrl->cntlid); + if (unlikely(!percpu_ref_tryget_live(&pc_ref->ref))) + return NVME_SC_INTERNAL; + req->pc_ref = pc_ref; + return NVME_SC_SUCCESS; +} + +static void nvmet_pr_ctrl_ns_all_cmds_done(struct percpu_ref *ref) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref = + container_of(ref, struct nvmet_pr_per_ctrl_ref, ref); + + complete(&pc_ref->free_done); +} + +static int nvmet_pr_alloc_and_insert_pc_ref(struct nvmet_ns *ns, + unsigned long idx, + uuid_t *hostid) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + int ret; + + pc_ref = kmalloc(sizeof(*pc_ref), GFP_ATOMIC); + if (!pc_ref) + return -ENOMEM; + + ret = percpu_ref_init(&pc_ref->ref, nvmet_pr_ctrl_ns_all_cmds_done, + PERCPU_REF_ALLOW_REINIT, GFP_KERNEL); + if (ret) + goto free; + + init_completion(&pc_ref->free_done); + init_completion(&pc_ref->confirm_done); + uuid_copy(&pc_ref->hostid, hostid); + + ret = xa_insert(&ns->pr_per_ctrl_refs, idx, pc_ref, GFP_KERNEL); + if (ret) + goto exit; + return ret; +exit: + percpu_ref_exit(&pc_ref->ref); +free: + kfree(pc_ref); + return ret; +} + +int nvmet_ctrl_init_pr(struct nvmet_ctrl *ctrl) +{ + struct nvmet_subsys *subsys = ctrl->subsys; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns = NULL; + unsigned long idx; + int ret; + + ctrl->pr_log_mgr.counter = 0; + ctrl->pr_log_mgr.lost_count = 0; + mutex_init(&ctrl->pr_log_mgr.lock); + INIT_KFIFO(ctrl->pr_log_mgr.log_queue); + + /* + * Here we are under subsys lock, if an ns not in subsys->namespaces, + * we can make sure that ns is not enabled, and not call + * nvmet_pr_init_ns(), see more details in nvmet_ns_enable(). + * So just check ns->pr.enable. + */ + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid, + &ctrl->hostid); + if (ret) + goto free_per_ctrl_refs; + } + } + return 0; + +free_per_ctrl_refs: + nvmet_for_each_enabled_ns(&subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid); + if (pc_ref) + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + } + return ret; +} + +void nvmet_ctrl_destroy_pr(struct nvmet_ctrl *ctrl) +{ + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ns *ns; + unsigned long idx; + + kfifo_free(&ctrl->pr_log_mgr.log_queue); + mutex_destroy(&ctrl->pr_log_mgr.lock); + + nvmet_for_each_enabled_ns(&ctrl->subsys->namespaces, idx, ns) { + if (ns->pr.enable) { + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, ctrl->cntlid); + if (pc_ref) + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + } +} + +int nvmet_pr_init_ns(struct nvmet_ns *ns) +{ + struct nvmet_subsys *subsys = ns->subsys; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_ctrl *ctrl = NULL; + unsigned long idx; + int ret; + + ns->pr.holder = NULL; + atomic_set(&ns->pr.generation, 0); + sema_init(&ns->pr.pr_sem, 1); + INIT_LIST_HEAD(&ns->pr.registrant_list); + ns->pr.notify_mask = 0; + + xa_init(&ns->pr_per_ctrl_refs); + + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + ret = nvmet_pr_alloc_and_insert_pc_ref(ns, ctrl->cntlid, + &ctrl->hostid); + if (ret) + goto free_per_ctrl_refs; + } + return 0; + +free_per_ctrl_refs: + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + xa_erase(&ns->pr_per_ctrl_refs, idx); + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + return ret; +} + +void nvmet_pr_exit_ns(struct nvmet_ns *ns) +{ + struct nvmet_pr_registrant *reg, *tmp; + struct nvmet_pr_per_ctrl_ref *pc_ref; + struct nvmet_pr *pr = &ns->pr; + unsigned long idx; + + list_for_each_entry_safe(reg, tmp, &pr->registrant_list, entry) { + list_del(®->entry); + kfree(reg); + } + + xa_for_each(&ns->pr_per_ctrl_refs, idx, pc_ref) { + /* + * No command on ns here, we can safely free pc_ref. + */ + pc_ref = xa_erase(&ns->pr_per_ctrl_refs, idx); + percpu_ref_exit(&pc_ref->ref); + kfree(pc_ref); + } + + xa_destroy(&ns->pr_per_ctrl_refs); +} diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 689bb5d3cfdc..9c12b2361a6d 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -16,7 +16,7 @@ #include <linux/string.h> #include <linux/wait.h> #include <linux/inet.h> -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <rdma/ib_verbs.h> #include <rdma/rdma_cm.h> @@ -39,6 +39,8 @@ #define NVMET_RDMA_BACKLOG 128 +#define NVMET_RDMA_DISCRETE_RSP_TAG -1 + struct nvmet_rdma_srq; struct nvmet_rdma_cmd { @@ -75,7 +77,7 @@ struct nvmet_rdma_rsp { u32 invalidate_rkey; struct list_head wait_list; - struct list_head free_list; + int tag; }; enum nvmet_rdma_queue_state { @@ -98,8 +100,7 @@ struct nvmet_rdma_queue { struct nvmet_sq nvme_sq; struct nvmet_rdma_rsp *rsps; - struct list_head free_rsps; - spinlock_t rsps_lock; + struct sbitmap rsp_tags; struct nvmet_rdma_cmd *cmds; struct work_struct release_work; @@ -172,7 +173,8 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, struct nvmet_rdma_rsp *r); static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r); + struct nvmet_rdma_rsp *r, + int tag); static const struct nvmet_fabrics_ops nvmet_rdma_ops; @@ -210,15 +212,12 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) static inline struct nvmet_rdma_rsp * nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) { - struct nvmet_rdma_rsp *rsp; - unsigned long flags; + struct nvmet_rdma_rsp *rsp = NULL; + int tag; - spin_lock_irqsave(&queue->rsps_lock, flags); - rsp = list_first_entry_or_null(&queue->free_rsps, - struct nvmet_rdma_rsp, free_list); - if (likely(rsp)) - list_del(&rsp->free_list); - spin_unlock_irqrestore(&queue->rsps_lock, flags); + tag = sbitmap_get(&queue->rsp_tags); + if (tag >= 0) + rsp = &queue->rsps[tag]; if (unlikely(!rsp)) { int ret; @@ -226,13 +225,12 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); if (unlikely(!rsp)) return NULL; - ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, + NVMET_RDMA_DISCRETE_RSP_TAG); if (unlikely(ret)) { kfree(rsp); return NULL; } - - rsp->allocated = true; } return rsp; @@ -241,17 +239,13 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) static inline void nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) { - unsigned long flags; - - if (unlikely(rsp->allocated)) { + if (unlikely(rsp->tag == NVMET_RDMA_DISCRETE_RSP_TAG)) { nvmet_rdma_free_rsp(rsp->queue->dev, rsp); kfree(rsp); return; } - spin_lock_irqsave(&rsp->queue->rsps_lock, flags); - list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); - spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); + sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); } static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, @@ -373,7 +367,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmds; int ret = -EINVAL, i; - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); if (!cmds) goto out; @@ -388,7 +382,7 @@ nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, out_free: while (--i >= 0) nvmet_rdma_free_cmd(ndev, cmds + i, admin); - kfree(cmds); + kvfree(cmds); out: return ERR_PTR(ret); } @@ -400,11 +394,11 @@ static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, for (i = 0; i < nr_cmds; i++) nvmet_rdma_free_cmd(ndev, cmds + i, admin); - kfree(cmds); + kvfree(cmds); } static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, - struct nvmet_rdma_rsp *r) + struct nvmet_rdma_rsp *r, int tag) { /* NVMe CQE / RDMA SEND */ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); @@ -432,6 +426,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, r->read_cqe.done = nvmet_rdma_read_data_done; /* Data Out / RDMA WRITE */ r->write_cqe.done = nvmet_rdma_write_data_done; + r->tag = tag; return 0; @@ -454,21 +449,23 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) { struct nvmet_rdma_device *ndev = queue->dev; int nr_rsps = queue->recv_queue_size * 2; - int ret = -EINVAL, i; + int ret = -ENOMEM, i; - queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), + if (sbitmap_init_node(&queue->rsp_tags, nr_rsps, -1, GFP_KERNEL, + NUMA_NO_NODE, false, true)) + goto out; + + queue->rsps = kvcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), GFP_KERNEL); if (!queue->rsps) - goto out; + goto out_free_sbitmap; for (i = 0; i < nr_rsps; i++) { struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; - ret = nvmet_rdma_alloc_rsp(ndev, rsp); + ret = nvmet_rdma_alloc_rsp(ndev, rsp, i); if (ret) goto out_free; - - list_add_tail(&rsp->free_list, &queue->free_rsps); } return 0; @@ -476,7 +473,9 @@ nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) out_free: while (--i >= 0) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); - kfree(queue->rsps); + kvfree(queue->rsps); +out_free_sbitmap: + sbitmap_free(&queue->rsp_tags); out: return ret; } @@ -488,7 +487,8 @@ static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) for (i = 0; i < nr_rsps; i++) nvmet_rdma_free_rsp(ndev, &queue->rsps[i]); - kfree(queue->rsps); + kvfree(queue->rsps); + sbitmap_free(&queue->rsp_tags); } static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, @@ -578,8 +578,8 @@ static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, if (control & NVME_RW_PRINFO_PRCHK_REF) domain->sig.dif.ref_remap = true; - domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); - domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); + domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.lbat); + domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.lbatm); domain->sig.dif.app_escape = true; if (pi_type == NVME_NS_DPS_PI_TYPE3) domain->sig.dif.ref_escape = true; @@ -852,12 +852,12 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) if (!nvme_is_write(rsp->req.cmd)) { rsp->req.error_loc = offsetof(struct nvme_common_command, opcode); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; } /* no data command? */ @@ -919,7 +919,7 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } case NVME_KEY_SGL_FMT_DATA_DESC: switch (sgl->type & 0xf) { @@ -931,12 +931,12 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) pr_err("invalid SGL subtype: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } default: pr_err("invalid SGL type: %#x\n", sgl->type); rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); - return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; } } @@ -976,8 +976,7 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, cmd->send_sge.addr, cmd->send_sge.length, DMA_TO_DEVICE); - if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_rdma_ops)) + if (!nvmet_req_init(&cmd->req, &queue->nvme_sq, &nvmet_rdma_ops)) return; status = nvmet_rdma_map_sgl(cmd); @@ -996,6 +995,27 @@ out_err: nvmet_req_complete(&cmd->req, status); } +static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, + struct nvmet_rdma_rsp *rsp) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&queue->state_lock, flags); + /* + * recheck queue state is not live to prevent a race condition + * with RDMA_CM_EVENT_ESTABLISHED handler. + */ + if (queue->state == NVMET_RDMA_Q_LIVE) + ret = false; + else if (queue->state == NVMET_RDMA_Q_CONNECTING) + list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); + else + nvmet_rdma_put_rsp(rsp); + spin_unlock_irqrestore(&queue->state_lock, flags); + return ret; +} + static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = @@ -1038,17 +1058,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) rsp->n_rdma = 0; rsp->invalidate_rkey = 0; - if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { - unsigned long flags; - - spin_lock_irqsave(&queue->state_lock, flags); - if (queue->state == NVMET_RDMA_Q_CONNECTING) - list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); - else - nvmet_rdma_put_rsp(rsp); - spin_unlock_irqrestore(&queue->state_lock, flags); + if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && + nvmet_rdma_recv_not_live(queue, rsp)) return; - } nvmet_rdma_handle_command(queue, rsp); } @@ -1340,6 +1352,7 @@ static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) pr_debug("freeing queue %d\n", queue->idx); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); nvmet_rdma_destroy_queue_ib(queue); if (!queue->nsrq) { @@ -1423,7 +1436,8 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, goto out_reject; } - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) { ret = NVME_RDMA_CM_NO_RSC; goto out_free_queue; @@ -1447,8 +1461,6 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, INIT_LIST_HEAD(&queue->rsp_wait_list); INIT_LIST_HEAD(&queue->rsp_wr_wait_list); spin_lock_init(&queue->rsp_wr_wait_lock); - INIT_LIST_HEAD(&queue->free_rsps); - spin_lock_init(&queue->rsps_lock); INIT_LIST_HEAD(&queue->queue_list); queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); @@ -1506,6 +1518,7 @@ out_ida_remove: out_destroy_sq: nvmet_sq_destroy(&queue->nvme_sq); out_free_queue: + nvmet_cq_put(&queue->nvme_cq); kfree(queue); out_reject: nvmet_rdma_cm_reject(cm_id, ret); @@ -1718,7 +1731,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, * We registered an ib_client to handle device removal for queues, * so we only need to handle the listening port cm_ids. In this case * we nullify the priv to prevent double cm_id destruction and destroying - * the cm_id implicitely by returning a non-zero rc to the callout. + * the cm_id implicitly by returning a non-zero rc to the callout. */ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, struct nvmet_rdma_queue *queue) @@ -1729,7 +1742,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, /* * This is a queue cm_id. we have registered * an ib_client to handle queues removal - * so don't interfear and just return. + * so don't interfere and just return. */ return 0; } @@ -1747,7 +1760,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, /* * We need to return 1 so that the core will destroy - * it's own ID. What a great API design.. + * its own ID. What a great API design.. */ return 1; } @@ -1988,7 +2001,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, struct nvmet_rdma_port *port = nport->priv; struct rdma_cm_id *cm_id = port->cm_id; - if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { + if (inet_addr_is_any(&cm_id->route.addr.src_addr)) { struct nvmet_rdma_rsp *rsp = container_of(req, struct nvmet_rdma_rsp, req); struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; @@ -2000,6 +2013,17 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, } } +static ssize_t nvmet_rdma_host_port_addr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + struct nvmet_sq *nvme_sq = ctrl->sqs[0]; + struct nvmet_rdma_queue *queue = + container_of(nvme_sq, struct nvmet_rdma_queue, nvme_sq); + + return snprintf(traddr, traddr_len, "%pISc", + (struct sockaddr *)&queue->cm_id->route.addr.dst_addr); +} + static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) { if (ctrl->pi_support) @@ -2024,6 +2048,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .queue_response = nvmet_rdma_queue_response, .delete_ctrl = nvmet_rdma_delete_ctrl, .disc_traddr = nvmet_rdma_disc_port_addr, + .host_traddr = nvmet_rdma_host_port_addr, .get_mdts = nvmet_rdma_get_mdts, .get_max_queue_size = nvmet_rdma_get_max_queue_size, }; diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 380f22ee3ebb..15416ff0eac4 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -7,8 +7,8 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/crc32c.h> #include <linux/err.h> -#include <linux/key.h> #include <linux/nvme-tcp.h> #include <linux/nvme-keyring.h> #include <net/sock.h> @@ -18,7 +18,6 @@ #include <net/handshake.h> #include <linux/inet.h> #include <linux/llist.h> -#include <crypto/hash.h> #include <trace/events/sock.h> #include "nvmet.h" @@ -173,8 +172,6 @@ struct nvmet_tcp_queue { /* digest state */ bool hdr_digest; bool data_digest; - struct ahash_request *snd_hash; - struct ahash_request *rcv_hash; /* TLS state */ key_serial_t tls_pskid; @@ -295,14 +292,9 @@ static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } -static inline void nvmet_tcp_hdgst(struct ahash_request *hash, - void *pdu, size_t len) +static inline void nvmet_tcp_hdgst(void *pdu, size_t len) { - struct scatterlist sg; - - sg_init_one(&sg, pdu, len); - ahash_request_set_crypt(hash, &sg, pdu + len, len); - crypto_ahash_digest(hash); + put_unaligned_le32(~crc32c(~0, pdu, len), pdu + len); } static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, @@ -319,7 +311,7 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue, } recv_digest = *(__le32 *)(pdu + hdr->hlen); - nvmet_tcp_hdgst(queue->rcv_hash, pdu, len); + nvmet_tcp_hdgst(pdu, len); exp_digest = *(__le32 *)(pdu + hdr->hlen); if (recv_digest != exp_digest) { pr_err("queue %d: header digest error: recv %#x expected %#x\n", @@ -416,10 +408,10 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd) if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET)) { if (!nvme_is_write(cmd->req.cmd)) - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; if (len > cmd->req.port->inline_data_size) - return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; + return NVME_SC_SGL_INVALID_OFFSET | NVME_STATUS_DNR; cmd->pdu_len = len; } cmd->req.transfer_len += len; @@ -442,12 +434,24 @@ err: return NVME_SC_INTERNAL; } -static void nvmet_tcp_calc_ddgst(struct ahash_request *hash, - struct nvmet_tcp_cmd *cmd) +static void nvmet_tcp_calc_ddgst(struct nvmet_tcp_cmd *cmd) { - ahash_request_set_crypt(hash, cmd->req.sg, - (void *)&cmd->exp_ddgst, cmd->req.transfer_len); - crypto_ahash_digest(hash); + size_t total_len = cmd->req.transfer_len; + struct scatterlist *sg = cmd->req.sg; + u32 crc = ~0; + + while (total_len) { + size_t len = min_t(size_t, total_len, sg->length); + + /* + * Note that the scatterlist does not contain any highmem pages, + * as it was allocated by sgl_alloc() with GFP_KERNEL. + */ + crc = crc32c(crc, sg_virt(sg), len); + total_len -= len; + sg = sg_next(sg); + } + cmd->exp_ddgst = cpu_to_le32(~crc); } static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -474,19 +478,18 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd) if (queue->data_digest) { pdu->hdr.flags |= NVME_TCP_F_DDGST; - nvmet_tcp_calc_ddgst(queue->snd_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); } if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -504,14 +507,13 @@ static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd) pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) { struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu; - struct nvmet_tcp_queue *queue = cmd->queue; u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue); cmd->offset = 0; @@ -524,7 +526,7 @@ static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd) pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); if (cmd->queue->hdr_digest) { pdu->hdr.flags |= NVME_TCP_F_HDGST; - nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); + nvmet_tcp_hdgst(pdu, sizeof(*pdu)); } } @@ -571,10 +573,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; + enum nvmet_tcp_recv_state queue_state; + struct nvmet_tcp_cmd *queue_cmd; struct nvme_sgl_desc *sgl; u32 len; - if (unlikely(cmd == queue->cmd)) { + /* Pairs with store_release in nvmet_prepare_receive_pdu() */ + queue_state = smp_load_acquire(&queue->rcv_state); + queue_cmd = READ_ONCE(queue->cmd); + + if (unlikely(cmd == queue_cmd)) { sgl = &cmd->req.cmd->common.dptr.sgl; len = le32_to_cpu(sgl->length); @@ -583,7 +591,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) * Avoid using helpers, this might happen before * nvmet_req_init is completed. */ - if (queue->rcv_state == NVMET_TCP_RECV_PDU && + if (queue_state == NVMET_TCP_RECV_PDU && len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; @@ -847,46 +855,11 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); - queue->cmd = NULL; - queue->rcv_state = NVMET_TCP_RECV_PDU; -} - -static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); - - ahash_request_free(queue->rcv_hash); - ahash_request_free(queue->snd_hash); - crypto_free_ahash(tfm); -} - -static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue) -{ - struct crypto_ahash *tfm; - - tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(tfm)) - return PTR_ERR(tfm); - - queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->snd_hash) - goto free_tfm; - ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); - - queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); - if (!queue->rcv_hash) - goto free_snd_hash; - ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); - - return 0; -free_snd_hash: - ahash_request_free(queue->snd_hash); -free_tfm: - crypto_free_ahash(tfm); - return -ENOMEM; + WRITE_ONCE(queue->cmd, NULL); + /* Ensure rcv_state is visible only after queue->cmd is set */ + smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } - static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) { struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq; @@ -915,11 +888,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue) queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE); queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE); - if (queue->hdr_digest || queue->data_digest) { - ret = nvmet_tcp_alloc_crypto(queue); - if (ret) - return ret; - } memset(icresp, 0, sizeof(*icresp)); icresp->hdr.type = nvme_tcp_icresp; @@ -1071,12 +1039,12 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue) req = &queue->cmd->req; memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd)); - if (unlikely(!nvmet_req_init(req, &queue->nvme_cq, - &queue->nvme_sq, &nvmet_tcp_ops))) { - pr_err("failed cmd %p id %d opcode %d, data_len: %d\n", + if (unlikely(!nvmet_req_init(req, &queue->nvme_sq, &nvmet_tcp_ops))) { + pr_err("failed cmd %p id %d opcode %d, data_len: %d, status: %04x\n", req->cmd, req->cmd->common.command_id, req->cmd->common.opcode, - le32_to_cpu(req->cmd->common.dptr.sgl.length)); + le32_to_cpu(req->cmd->common.dptr.sgl.length), + le16_to_cpu(req->cqe->status)); nvmet_tcp_handle_req_failure(queue, queue->cmd, req); return 0; @@ -1240,7 +1208,7 @@ static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd) { struct nvmet_tcp_queue *queue = cmd->queue; - nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd); + nvmet_tcp_calc_ddgst(cmd); queue->offset = 0; queue->left = NVME_TCP_DIGEST_LENGTH; queue->rcv_state = NVMET_TCP_RECV_DDGST; @@ -1516,7 +1484,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) struct nvmet_tcp_cmd *cmds; int i, ret = -EINVAL, nr_cmds = queue->nr_cmds; - cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); + cmds = kvcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL); if (!cmds) goto out; @@ -1532,7 +1500,7 @@ static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue) out_free: while (--i >= 0) nvmet_tcp_free_cmd(cmds + i); - kfree(cmds); + kvfree(cmds); out: return ret; } @@ -1546,13 +1514,16 @@ static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue) nvmet_tcp_free_cmd(cmds + i); nvmet_tcp_free_cmd(&queue->connect); - kfree(cmds); + kvfree(cmds); } static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; + if (!queue->state_change) + return; + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; @@ -1602,15 +1573,15 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w) /* stop accepting incoming data */ queue->rcv_state = NVMET_TCP_RECV_ERR; + nvmet_sq_put_tls_key(&queue->nvme_sq); nvmet_tcp_uninit_data_in_cmds(queue); nvmet_sq_destroy(&queue->nvme_sq); + nvmet_cq_put(&queue->nvme_cq); cancel_work_sync(&queue->io_work); nvmet_tcp_free_cmd_data_in_buffers(queue); /* ->sock will be released by fput() */ fput(queue->sock->file); nvmet_tcp_free_cmds(queue); - if (queue->hdr_digest || queue->data_digest) - nvmet_tcp_free_crypto(queue); ida_free(&nvmet_tcp_queue_ida, queue->idx); page_frag_cache_drain(&queue->pf_cache); kfree(queue); @@ -1787,6 +1758,27 @@ static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue) return 0; } +static int nvmet_tcp_tls_key_lookup(struct nvmet_tcp_queue *queue, + key_serial_t peerid) +{ + struct key *tls_key = nvme_tls_key_lookup(peerid); + int status = 0; + + if (IS_ERR(tls_key)) { + pr_warn("%s: queue %d failed to lookup key %x\n", + __func__, queue->idx, peerid); + spin_lock_bh(&queue->state_lock); + queue->state = NVMET_TCP_Q_FAILED; + spin_unlock_bh(&queue->state_lock); + status = PTR_ERR(tls_key); + } else { + pr_debug("%s: queue %d using TLS PSK %x\n", + __func__, queue->idx, peerid); + queue->nvme_sq.tls_key = tls_key; + } + return status; +} + static void nvmet_tcp_tls_handshake_done(void *data, int status, key_serial_t peerid) { @@ -1807,6 +1799,10 @@ static void nvmet_tcp_tls_handshake_done(void *data, int status, spin_unlock_bh(&queue->state_lock); cancel_delayed_work_sync(&queue->tls_handshake_tmo_work); + + if (!status) + status = nvmet_tcp_tls_key_lookup(queue, peerid); + if (status) nvmet_tcp_schedule_release_queue(queue); else @@ -1914,7 +1910,8 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_ida_remove; - ret = nvmet_sq_init(&queue->nvme_sq); + nvmet_cq_init(&queue->nvme_cq); + ret = nvmet_sq_init(&queue->nvme_sq, &queue->nvme_cq); if (ret) goto out_free_connect; @@ -1931,10 +1928,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct sock *sk = queue->sock->sk; /* Restore the default callbacks before starting upcall */ - read_lock_bh(&sk->sk_callback_lock); + write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = port->data_ready; - read_unlock_bh(&sk->sk_callback_lock); + write_unlock_bh(&sk->sk_callback_lock); if (!nvmet_tcp_try_peek_pdu(queue)) { if (!nvmet_tcp_tls_handshake(queue)) return; @@ -1957,6 +1954,7 @@ out_destroy_sq: mutex_unlock(&nvmet_tcp_queue_mutex); nvmet_sq_destroy(&queue->nvme_sq); out_free_connect: + nvmet_cq_put(&queue->nvme_cq); nvmet_tcp_free_cmd(&queue->connect); out_ida_remove: ida_free(&nvmet_tcp_queue_ida, queue->idx); @@ -2057,7 +2055,7 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) if (so_priority > 0) sock_set_priority(port->sock->sk, so_priority); - ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr, + ret = kernel_bind(port->sock, (struct sockaddr_unsized *)&port->addr, sizeof(port->addr)); if (ret) { pr_err("failed to bind port socket %d\n", ret); @@ -2146,8 +2144,10 @@ static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) } queue->nr_cmds = sq->size * 2; - if (nvmet_tcp_alloc_cmds(queue)) + if (nvmet_tcp_alloc_cmds(queue)) { + queue->nr_cmds = 0; return NVME_SC_INTERNAL; + } return 0; } @@ -2156,7 +2156,7 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, { struct nvmet_tcp_port *port = nport->priv; - if (inet_addr_is_any((struct sockaddr *)&port->addr)) { + if (inet_addr_is_any(&port->addr)) { struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; @@ -2167,6 +2167,19 @@ static void nvmet_tcp_disc_port_addr(struct nvmet_req *req, } } +static ssize_t nvmet_tcp_host_port_addr(struct nvmet_ctrl *ctrl, + char *traddr, size_t traddr_len) +{ + struct nvmet_sq *sq = ctrl->sqs[0]; + struct nvmet_tcp_queue *queue = + container_of(sq, struct nvmet_tcp_queue, nvme_sq); + + if (queue->sockaddr_peer.ss_family == AF_UNSPEC) + return -EINVAL; + return snprintf(traddr, traddr_len, "%pISc", + (struct sockaddr *)&queue->sockaddr_peer); +} + static const struct nvmet_fabrics_ops nvmet_tcp_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_TCP, @@ -2177,6 +2190,7 @@ static const struct nvmet_fabrics_ops nvmet_tcp_ops = { .delete_ctrl = nvmet_tcp_delete_ctrl, .install_queue = nvmet_tcp_install_queue, .disc_traddr = nvmet_tcp_disc_port_addr, + .host_traddr = nvmet_tcp_host_port_addr, }; static int __init nvmet_tcp_init(void) diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c index 8d1806a82887..6dbc7036f2e4 100644 --- a/drivers/nvme/target/trace.c +++ b/drivers/nvme/target/trace.c @@ -4,7 +4,7 @@ * Copyright (c) 2018 Johannes Thumshirn, SUSE Linux GmbH */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include "trace.h" static const char *nvmet_trace_admin_identify(struct trace_seq *p, u8 *cdw10) @@ -180,6 +180,106 @@ static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10) return ret; } +static const char *nvmet_trace_resv_reg(struct trace_seq *p, u8 *cdw10) +{ + static const char * const rrega_strs[] = { + [0x00] = "register", + [0x01] = "unregister", + [0x02] = "replace", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 rrega = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 ptpl = (cdw10[3] >> 6) & 0x3; + const char *rrega_str; + + if (rrega < ARRAY_SIZE(rrega_strs) && rrega_strs[rrega]) + rrega_str = rrega_strs[rrega]; + else + rrega_str = "reserved"; + + trace_seq_printf(p, "rrega=%u:%s, iekey=%u, ptpl=%u", + rrega, rrega_str, iekey, ptpl); + trace_seq_putc(p, 0); + + return ret; +} + +static const char * const rtype_strs[] = { + [0x00] = "reserved", + [0x01] = "write exclusive", + [0x02] = "exclusive access", + [0x03] = "write exclusive registrants only", + [0x04] = "exclusive access registrants only", + [0x05] = "write exclusive all registrants", + [0x06] = "exclusive access all registrants", +}; + +static const char *nvmet_trace_resv_acq(struct trace_seq *p, u8 *cdw10) +{ + static const char * const racqa_strs[] = { + [0x00] = "acquire", + [0x01] = "preempt", + [0x02] = "preempt and abort", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 racqa = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 rtype = cdw10[1]; + const char *racqa_str = "reserved"; + const char *rtype_str = "reserved"; + + if (racqa < ARRAY_SIZE(racqa_strs) && racqa_strs[racqa]) + racqa_str = racqa_strs[racqa]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "racqa=%u:%s, iekey=%u, rtype=%u:%s", + racqa, racqa_str, iekey, rtype, rtype_str); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvmet_trace_resv_rel(struct trace_seq *p, u8 *cdw10) +{ + static const char * const rrela_strs[] = { + [0x00] = "release", + [0x01] = "clear", + }; + const char *ret = trace_seq_buffer_ptr(p); + u8 rrela = cdw10[0] & 0x7; + u8 iekey = (cdw10[0] >> 3) & 0x1; + u8 rtype = cdw10[1]; + const char *rrela_str = "reserved"; + const char *rtype_str = "reserved"; + + if (rrela < ARRAY_SIZE(rrela_strs) && rrela_strs[rrela]) + rrela_str = rrela_strs[rrela]; + + if (rtype < ARRAY_SIZE(rtype_strs) && rtype_strs[rtype]) + rtype_str = rtype_strs[rtype]; + + trace_seq_printf(p, "rrela=%u:%s, iekey=%u, rtype=%u:%s", + rrela, rrela_str, iekey, rtype, rtype_str); + trace_seq_putc(p, 0); + + return ret; +} + +static const char *nvmet_trace_resv_report(struct trace_seq *p, u8 *cdw10) +{ + const char *ret = trace_seq_buffer_ptr(p); + u32 numd = get_unaligned_le32(cdw10); + u8 eds = cdw10[4] & 0x1; + + trace_seq_printf(p, "numd=%u, eds=%u", numd, eds); + trace_seq_putc(p, 0); + + return ret; +} + const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, u8 *cdw10) { @@ -195,6 +295,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p, return nvmet_trace_zone_mgmt_send(p, cdw10); case nvme_cmd_zone_mgmt_recv: return nvmet_trace_zone_mgmt_recv(p, cdw10); + case nvme_cmd_resv_register: + return nvmet_trace_resv_reg(p, cdw10); + case nvme_cmd_resv_acquire: + return nvmet_trace_resv_acq(p, cdw10); + case nvme_cmd_resv_release: + return nvmet_trace_resv_rel(p, cdw10); + case nvme_cmd_resv_report: + return nvmet_trace_resv_report(p, cdw10); default: return nvmet_trace_common(p, cdw10); } diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c index 0021d06041c1..15a579cf528c 100644 --- a/drivers/nvme/target/zns.c +++ b/drivers/nvme/target/zns.c @@ -100,7 +100,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { req->error_loc = offsetof(struct nvme_identify, nsid); - status = NVME_SC_INVALID_NS | NVME_SC_DNR; + status = NVME_SC_INVALID_NS | NVME_STATUS_DNR; goto out; } @@ -121,7 +121,7 @@ void nvmet_execute_identify_ns_zns(struct nvmet_req *req) } if (!bdev_is_zoned(req->ns->bdev)) { - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = offsetof(struct nvme_identify, nsid); goto out; } @@ -158,17 +158,17 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, slba); - return NVME_SC_LBA_RANGE | NVME_SC_DNR; + return NVME_SC_LBA_RANGE | NVME_STATUS_DNR; } if (out_bufsize < sizeof(struct nvme_zone_report)) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, numd); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } if (req->cmd->zmr.zra != NVME_ZRA_ZONE_REPORT) { req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zra); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.pr) { @@ -177,7 +177,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) break; default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, pr); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } switch (req->cmd->zmr.zrasf) { @@ -193,7 +193,7 @@ static u16 nvmet_bdev_validate_zone_mgmt_recv(struct nvmet_req *req) default: req->error_loc = offsetof(struct nvme_zone_mgmt_recv_cmd, zrasf); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -341,7 +341,7 @@ static u16 blkdev_zone_mgmt_errno_to_nvme_status(int ret) return NVME_SC_SUCCESS; case -EINVAL: case -EIO: - return NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + return NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; default: return NVME_SC_INTERNAL; } @@ -463,7 +463,7 @@ static u16 nvmet_bdev_execute_zmgmt_send_all(struct nvmet_req *req) default: /* this is needed to quiet compiler warning */ req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - return NVME_SC_INVALID_FIELD | NVME_SC_DNR; + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; } return NVME_SC_SUCCESS; @@ -481,7 +481,7 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (op == REQ_OP_LAST) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, zsa); - status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_SC_DNR; + status = NVME_SC_ZONE_INVALID_TRANSITION | NVME_STATUS_DNR; goto out; } @@ -493,13 +493,13 @@ static void nvmet_bdev_zmgmt_send_work(struct work_struct *w) if (sect >= get_capacity(bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (zone_sectors - 1)) { req->error_loc = offsetof(struct nvme_zone_mgmt_send_cmd, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -537,13 +537,21 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) u16 status = NVME_SC_SUCCESS; unsigned int total_len = 0; struct scatterlist *sg; + u32 data_len = nvmet_rw_data_len(req); struct bio *bio; int sg_cnt; - /* Request is completed on len mismatch in nvmet_check_transter_len() */ + /* Request is completed on len mismatch in nvmet_check_transfer_len() */ if (!nvmet_check_transfer_len(req, nvmet_rw_data_len(req))) return; + if (data_len > + bdev_max_zone_append_sectors(req->ns->bdev) << SECTOR_SHIFT) { + req->error_loc = offsetof(struct nvme_rw_command, length); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto out; + } + if (!req->sg_cnt) { nvmet_req_complete(req, 0); return; @@ -551,13 +559,13 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) if (sect >= get_capacity(req->ns->bdev->bd_disk)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_LBA_RANGE | NVME_SC_DNR; + status = NVME_SC_LBA_RANGE | NVME_STATUS_DNR; goto out; } if (sect & (bdev_zone_sectors(req->ns->bdev) - 1)) { req->error_loc = offsetof(struct nvme_rw_command, slba); - status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; goto out; } @@ -576,21 +584,17 @@ void nvmet_bdev_execute_zone_append(struct nvmet_req *req) bio->bi_opf |= REQ_FUA; for_each_sg(req->sg, sg, req->sg_cnt, sg_cnt) { - struct page *p = sg_page(sg); - unsigned int l = sg->length; - unsigned int o = sg->offset; - unsigned int ret; + unsigned int len = sg->length; - ret = bio_add_zone_append_page(bio, p, l, o); - if (ret != sg->length) { + if (bio_add_page(bio, sg_page(sg), len, sg->offset) != len) { status = NVME_SC_INTERNAL; goto out_put_bio; } - total_len += sg->length; + total_len += len; } - if (total_len != nvmet_rw_data_len(req)) { - status = NVME_SC_INTERNAL | NVME_SC_DNR; + if (total_len != data_len) { + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; goto out_put_bio; } |
