diff options
author | Jens Axboe <axboe@kernel.dk> | 2025-01-13 07:12:15 -0700 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2025-01-13 07:12:15 -0700 |
commit | 9752b55035b161e40220b9ec8fae6e363a601996 (patch) | |
tree | 6143154de8fd423044fa01d1ca6f2651876a6f24 /drivers/nvme | |
parent | afd69d5c4a1049230fa91c9b54fdd8132f755503 (diff) | |
parent | 4a324970fabad503260973cd588609f3a26baab9 (diff) |
Merge tag 'nvme-6.14-2025-01-12' of git://git.infradead.org/nvme into for-6.14/block
Pull NVMe updates from Keith:
"nvme updates for Linux 6.14
- Target support for PCI-Endpoint transport (Damien)
- TCP IO queue spreading fixes (Sagi, Chaitanya)
- Target handling for "limited retry" flags (Guixen)
- Poll type fix (Yongsoo)
- Xarray storage error handling (Keisuke)
- Host memory buffer free size fix on error (Francis)"
* tag 'nvme-6.14-2025-01-12' of git://git.infradead.org/nvme: (25 commits)
nvme-pci: use correct size to free the hmb buffer
nvme: Add error path for xa_store in nvme_init_effects
nvme-pci: fix comment typo
Documentation: Document the NVMe PCI endpoint target driver
nvmet: New NVMe PCI endpoint function target driver
nvmet: Implement arbitration feature support
nvmet: Implement interrupt config feature support
nvmet: Implement interrupt coalescing feature support
nvmet: Implement host identifier set feature support
nvmet: Introduce get/set_feature controller operations
nvmet: Do not require SGL for PCI target controller commands
nvmet: Add support for I/O queue management admin commands
nvmet: Introduce nvmet_sq_create() and nvmet_cq_create()
nvmet: Introduce nvmet_req_transfer_len()
nvmet: Improve nvmet_alloc_ctrl() interface and implementation
nvme: Add PCI transport type
nvmet: Add drvdata field to struct nvmet_ctrl
nvmet: Introduce nvmet_get_cmd_effects_admin()
nvmet: Export nvmet_update_cc() and nvmet_cc_xxx() helpers
nvmet: Add vendor_id and subsys_vendor_id subsystem attributes
...
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 34 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 39 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 14 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 70 | ||||
-rw-r--r-- | drivers/nvme/target/Kconfig | 11 | ||||
-rw-r--r-- | drivers/nvme/target/Makefile | 2 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 388 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 49 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 266 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 17 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd-auth.c | 14 | ||||
-rw-r--r-- | drivers/nvme/target/fabrics-cmd.c | 101 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 3 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 110 | ||||
-rw-r--r-- | drivers/nvme/target/pci-epf.c | 2591 |
15 files changed, 3523 insertions, 186 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1ccf17f6ea7f..9e7f1bb81973 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3093,7 +3093,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { - struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); + struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi); int ret; if (cel) @@ -3110,7 +3110,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, return ret; } - xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + if (xa_is_err(old)) { + kfree(cel); + return xa_err(old); + } out: *log = cel; return 0; @@ -3172,6 +3176,25 @@ free_data: return ret; } +static int nvme_init_effects_log(struct nvme_ctrl *ctrl, + u8 csi, struct nvme_effects_log **log) +{ + struct nvme_effects_log *effects, *old; + + effects = kzalloc(sizeof(*effects), GFP_KERNEL); + if (effects) + return -ENOMEM; + + old = xa_store(&ctrl->cels, csi, effects, GFP_KERNEL); + if (xa_is_err(old)) { + kfree(effects); + return xa_err(old); + } + + *log = effects; + return 0; +} + static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl) { struct nvme_effects_log *log = ctrl->effects; @@ -3218,10 +3241,9 @@ static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) } if (!ctrl->effects) { - ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL); - if (!ctrl->effects) - return -ENOMEM; - xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL); + ret = nvme_init_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects); + if (ret < 0) + return ret; } nvme_init_known_nvm_effects(ctrl); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 611b02c8a8b3..2c76afd00390 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1182,43 +1182,4 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; } -#ifdef CONFIG_NVME_VERBOSE_ERRORS -const char *nvme_get_error_status_str(u16 status); -const char *nvme_get_opcode_str(u8 opcode); -const char *nvme_get_admin_opcode_str(u8 opcode); -const char *nvme_get_fabrics_opcode_str(u8 opcode); -#else /* CONFIG_NVME_VERBOSE_ERRORS */ -static inline const char *nvme_get_error_status_str(u16 status) -{ - return "I/O Error"; -} -static inline const char *nvme_get_opcode_str(u8 opcode) -{ - return "I/O Cmd"; -} -static inline const char *nvme_get_admin_opcode_str(u8 opcode) -{ - return "Admin Cmd"; -} - -static inline const char *nvme_get_fabrics_opcode_str(u8 opcode) -{ - return "Fabrics Cmd"; -} -#endif /* CONFIG_NVME_VERBOSE_ERRORS */ - -static inline const char *nvme_opcode_str(int qid, u8 opcode) -{ - return qid ? nvme_get_opcode_str(opcode) : - nvme_get_admin_opcode_str(opcode); -} - -static inline const char *nvme_fabrics_opcode_str( - int qid, const struct nvme_command *cmd) -{ - if (nvme_is_fabrics(cmd)) - return nvme_get_fabrics_opcode_str(cmd->fabrics.fctype); - - return nvme_opcode_str(qid, cmd->common.opcode); -} #endif /* _NVME_H */ diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 709328a67f91..fe0795e16e25 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -372,7 +372,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, /* * Ensure that the doorbell is updated before reading the event * index from memory. The controller needs to provide similar - * ordering to ensure the envent index is updated before reading + * ordering to ensure the event index is updated before reading * the doorbell. */ mb(); @@ -1147,13 +1147,13 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) } } -static inline int nvme_poll_cq(struct nvme_queue *nvmeq, - struct io_comp_batch *iob) +static inline bool nvme_poll_cq(struct nvme_queue *nvmeq, + struct io_comp_batch *iob) { - int found = 0; + bool found = false; while (nvme_cqe_pending(nvmeq)) { - found++; + found = true; /* * load-load control dependency between phase and the rest of * the cqe requires a full read memory barrier @@ -2085,8 +2085,8 @@ static int nvme_alloc_host_mem_single(struct nvme_dev *dev, u64 size) sizeof(*dev->host_mem_descs), &dev->host_mem_descs_dma, GFP_KERNEL); if (!dev->host_mem_descs) { - dma_free_noncontiguous(dev->dev, dev->host_mem_size, - dev->hmb_sgt, DMA_BIDIRECTIONAL); + dma_free_noncontiguous(dev->dev, size, dev->hmb_sgt, + DMA_BIDIRECTIONAL); dev->hmb_sgt = NULL; return -ENOMEM; } diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 28c76a3e1bd2..dc5bbca58c6d 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -54,6 +54,8 @@ MODULE_PARM_DESC(tls_handshake_timeout, "nvme TLS handshake timeout in seconds (default 10)"); #endif +static atomic_t nvme_tcp_cpu_queues[NR_CPUS]; + #ifdef CONFIG_DEBUG_LOCK_ALLOC /* lockdep can detect a circular dependency of the form * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock @@ -127,6 +129,7 @@ enum nvme_tcp_queue_flags { NVME_TCP_Q_ALLOCATED = 0, NVME_TCP_Q_LIVE = 1, NVME_TCP_Q_POLLING = 2, + NVME_TCP_Q_IO_CPU_SET = 3, }; enum nvme_tcp_recv_state { @@ -1562,23 +1565,56 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue) ctrl->io_queues[HCTX_TYPE_POLL]; } +/** + * Track the number of queues assigned to each cpu using a global per-cpu + * counter and select the least used cpu from the mq_map. Our goal is to spread + * different controllers I/O threads across different cpu cores. + * + * Note that the accounting is not 100% perfect, but we don't need to be, we're + * simply putting our best effort to select the best candidate cpu core that we + * find at any given point. + */ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue) { struct nvme_tcp_ctrl *ctrl = queue->ctrl; - int qid = nvme_tcp_queue_id(queue); - int n = 0; + struct blk_mq_tag_set *set = &ctrl->tag_set; + int qid = nvme_tcp_queue_id(queue) - 1; + unsigned int *mq_map = NULL; + int cpu, min_queues = INT_MAX, io_cpu; + + if (wq_unbound) + goto out; if (nvme_tcp_default_queue(queue)) - n = qid - 1; + mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map; else if (nvme_tcp_read_queue(queue)) - n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; + mq_map = set->map[HCTX_TYPE_READ].mq_map; else if (nvme_tcp_poll_queue(queue)) - n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - - ctrl->io_queues[HCTX_TYPE_READ] - 1; - if (wq_unbound) - queue->io_cpu = WORK_CPU_UNBOUND; - else - queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); + mq_map = set->map[HCTX_TYPE_POLL].mq_map; + + if (WARN_ON(!mq_map)) + goto out; + + /* Search for the least used cpu from the mq_map */ + io_cpu = WORK_CPU_UNBOUND; + for_each_online_cpu(cpu) { + int num_queues = atomic_read(&nvme_tcp_cpu_queues[cpu]); + + if (mq_map[cpu] != qid) + continue; + if (num_queues < min_queues) { + io_cpu = cpu; + min_queues = num_queues; + } + } + if (io_cpu != WORK_CPU_UNBOUND) { + queue->io_cpu = io_cpu; + atomic_inc(&nvme_tcp_cpu_queues[io_cpu]); + set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags); + } +out: + dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n", + qid, queue->io_cpu); } static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid) @@ -1722,7 +1758,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid, queue->sock->sk->sk_allocation = GFP_ATOMIC; queue->sock->sk->sk_use_task_frag = false; - nvme_tcp_set_queue_io_cpu(queue); + queue->io_cpu = WORK_CPU_UNBOUND; queue->request = NULL; queue->data_remaining = 0; queue->ddgst_remaining = 0; @@ -1844,6 +1880,9 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; + if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags)) + atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]); + mutex_lock(&queue->queue_lock); if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) __nvme_tcp_stop_queue(queue); @@ -1878,9 +1917,10 @@ static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) nvme_tcp_init_recv_ctx(queue); nvme_tcp_setup_sock_ops(queue); - if (idx) + if (idx) { + nvme_tcp_set_queue_io_cpu(queue); ret = nvmf_connect_io_queue(nctrl, idx); - else + } else ret = nvmf_connect_admin_queue(nctrl); if (!ret) { @@ -2849,6 +2889,7 @@ static struct nvmf_transport_ops nvme_tcp_transport = { static int __init nvme_tcp_init_module(void) { unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS; + int cpu; BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); @@ -2866,6 +2907,9 @@ static int __init nvme_tcp_init_module(void) if (!nvme_tcp_wq) return -ENOMEM; + for_each_possible_cpu(cpu) + atomic_set(&nvme_tcp_cpu_queues[cpu], 0); + nvmf_register_transport(&nvme_tcp_transport); return 0; } diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig index 46be031f91b4..fb7446d6d682 100644 --- a/drivers/nvme/target/Kconfig +++ b/drivers/nvme/target/Kconfig @@ -115,3 +115,14 @@ config NVME_TARGET_AUTH target side. If unsure, say N. + +config NVME_TARGET_PCI_EPF + tristate "NVMe PCI Endpoint Function target support" + depends on NVME_TARGET && PCI_ENDPOINT + depends on NVME_CORE=y || NVME_CORE=NVME_TARGET + help + This enables the NVMe PCI Endpoint Function target driver support, + which allows creating a NVMe PCI controller using an endpoint mode + capable PCI controller. + + If unsure, say N. diff --git a/drivers/nvme/target/Makefile b/drivers/nvme/target/Makefile index f2b025bbe10c..ed8522911d1f 100644 --- a/drivers/nvme/target/Makefile +++ b/drivers/nvme/target/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o +obj-$(CONFIG_NVME_TARGET_PCI_EPF) += nvmet-pci-epf.o nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \ discovery.o io-cmd-file.o io-cmd-bdev.o pr.o @@ -20,4 +21,5 @@ nvmet-rdma-y += rdma.o nvmet-fc-y += fc.o nvme-fcloop-y += fcloop.o nvmet-tcp-y += tcp.o +nvmet-pci-epf-y += pci-epf.o nvmet-$(CONFIG_TRACING) += trace.o diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 2962794ce881..3ddd8e44e148 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -12,6 +12,142 @@ #include <linux/unaligned.h> #include "nvmet.h" +static void nvmet_execute_delete_sq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!sqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_sqid(ctrl, sqid, false); + if (status != NVME_SC_SUCCESS) + goto complete; + + status = ctrl->ops->delete_sq(ctrl, sqid); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_create_sq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_command *cmd = req->cmd; + u16 sqid = le16_to_cpu(cmd->create_sq.sqid); + u16 cqid = le16_to_cpu(cmd->create_sq.cqid); + u16 sq_flags = le16_to_cpu(cmd->create_sq.sq_flags); + u16 qsize = le16_to_cpu(cmd->create_sq.qsize); + u64 prp1 = le64_to_cpu(cmd->create_sq.prp1); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!sqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_sqid(ctrl, sqid, true); + if (status != NVME_SC_SUCCESS) + goto complete; + + /* + * Note: The NVMe specification allows multiple SQs to use the same CQ. + * However, the target code does not really support that. So for now, + * prevent this and fail the command if sqid and cqid are different. + */ + if (!cqid || cqid != sqid) { + pr_err("SQ %u: Unsupported CQID %u\n", sqid, cqid); + status = NVME_SC_CQ_INVALID | NVME_STATUS_DNR; + goto complete; + } + + if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { + status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR; + goto complete; + } + + status = ctrl->ops->create_sq(ctrl, sqid, sq_flags, qsize, prp1); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_delete_cq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u16 cqid = le16_to_cpu(req->cmd->delete_queue.qid); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!cqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_cqid(ctrl, cqid); + if (status != NVME_SC_SUCCESS) + goto complete; + + status = ctrl->ops->delete_cq(ctrl, cqid); + +complete: + nvmet_req_complete(req, status); +} + +static void nvmet_execute_create_cq(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvme_command *cmd = req->cmd; + u16 cqid = le16_to_cpu(cmd->create_cq.cqid); + u16 cq_flags = le16_to_cpu(cmd->create_cq.cq_flags); + u16 qsize = le16_to_cpu(cmd->create_cq.qsize); + u16 irq_vector = le16_to_cpu(cmd->create_cq.irq_vector); + u64 prp1 = le64_to_cpu(cmd->create_cq.prp1); + u16 status; + + if (!nvmet_is_pci_ctrl(ctrl)) { + status = nvmet_report_invalid_opcode(req); + goto complete; + } + + if (!cqid) { + status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + status = nvmet_check_cqid(ctrl, cqid); + if (status != NVME_SC_SUCCESS) + goto complete; + + if (!qsize || qsize > NVME_CAP_MQES(ctrl->cap)) { + status = NVME_SC_QUEUE_SIZE | NVME_STATUS_DNR; + goto complete; + } + + status = ctrl->ops->create_cq(ctrl, cqid, cq_flags, qsize, + prp1, irq_vector); + +complete: + nvmet_req_complete(req, status); +} + u32 nvmet_get_log_page_len(struct nvme_command *cmd) { u32 len = le16_to_cpu(cmd->get_log_page.numdu); @@ -230,8 +366,18 @@ out: nvmet_req_complete(req, status); } -static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) +static void nvmet_get_cmd_effects_admin(struct nvmet_ctrl *ctrl, + struct nvme_effects_log *log) { + /* For a PCI target controller, advertize support for the . */ + if (nvmet_is_pci_ctrl(ctrl)) { + log->acs[nvme_admin_delete_sq] = + log->acs[nvme_admin_create_sq] = + log->acs[nvme_admin_delete_cq] = + log->acs[nvme_admin_create_cq] = + cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); + } + log->acs[nvme_admin_get_log_page] = log->acs[nvme_admin_identify] = log->acs[nvme_admin_abort_cmd] = @@ -240,7 +386,10 @@ static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) log->acs[nvme_admin_async_event] = log->acs[nvme_admin_keep_alive] = cpu_to_le32(NVME_CMD_EFFECTS_CSUPP); +} +static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log) +{ log->iocs[nvme_cmd_read] = log->iocs[nvme_cmd_flush] = log->iocs[nvme_cmd_dsm] = @@ -265,6 +414,7 @@ static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log) static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) { + struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_effects_log *log; u16 status = NVME_SC_SUCCESS; @@ -276,6 +426,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) switch (req->cmd->get_log_page.csi) { case NVME_CSI_NVM: + nvmet_get_cmd_effects_admin(ctrl, log); nvmet_get_cmd_effects_nvm(log); break; case NVME_CSI_ZNS: @@ -283,6 +434,7 @@ static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) status = NVME_SC_INVALID_IO_CMD_SET; goto free; } + nvmet_get_cmd_effects_admin(ctrl, log); nvmet_get_cmd_effects_nvm(log); nvmet_get_cmd_effects_zns(log); break; @@ -507,7 +659,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvmet_subsys *subsys = ctrl->subsys; struct nvme_id_ctrl *id; - u32 cmd_capsule_size; + u32 cmd_capsule_size, ctratt; u16 status = 0; if (!subsys->subsys_discovered) { @@ -522,9 +674,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) goto out; } - /* XXX: figure out how to assign real vendors IDs. */ - id->vid = 0; - id->ssvid = 0; + id->vid = cpu_to_le16(subsys->vendor_id); + id->ssvid = cpu_to_le16(subsys->subsys_vendor_id); memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE); memcpy_and_pad(id->mn, sizeof(id->mn), subsys->model_number, @@ -556,8 +707,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* XXX: figure out what to do about RTD3R/RTD3 */ id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL); - id->ctratt = cpu_to_le32(NVME_CTRL_ATTR_HID_128_BIT | - NVME_CTRL_ATTR_TBKAS); + ctratt = NVME_CTRL_ATTR_HID_128_BIT | NVME_CTRL_ATTR_TBKAS; + if (nvmet_is_pci_ctrl(ctrl)) + ctratt |= NVME_CTRL_ATTR_RHII; + id->ctratt = cpu_to_le32(ctratt); id->oacs = 0; @@ -1104,6 +1257,92 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask) return 0; } +static u16 nvmet_set_feat_host_id(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + + if (!nvmet_is_pci_ctrl(ctrl)) + return NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; + + /* + * The NVMe base specifications v2.1 recommends supporting 128-bits host + * IDs (section 5.1.25.1.28.1). However, that same section also says + * that "The controller may support a 64-bit Host Identifier and/or an + * extended 128-bit Host Identifier". So simplify this support and do + * not support 64-bits host IDs to avoid needing to check that all + * controllers associated with the same subsystem all use the same host + * ID size. + */ + if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) { + req->error_loc = offsetof(struct nvme_common_command, cdw11); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return nvmet_copy_from_sgl(req, 0, &req->sq->ctrl->hostid, + sizeof(req->sq->ctrl->hostid)); +} + +static u16 nvmet_set_feat_irq_coalesce(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_irq_coalesce irqc = { + .time = (cdw11 >> 8) & 0xff, + .thr = cdw11 & 0xff, + }; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); +} + +static u16 nvmet_set_feat_irq_config(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_irq_config irqcfg = { + .iv = cdw11 & 0xffff, + .cd = (cdw11 >> 16) & 0x1, + }; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); +} + +static u16 nvmet_set_feat_arbitration(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11); + struct nvmet_feat_arbitration arb = { + .hpw = (cdw11 >> 24) & 0xff, + .mpw = (cdw11 >> 16) & 0xff, + .lpw = (cdw11 >> 8) & 0xff, + .ab = cdw11 & 0x3, + }; + + if (!ctrl->ops->set_feature) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + return ctrl->ops->set_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); +} + void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = nvmet_req_subsys(req); @@ -1117,6 +1356,9 @@ void nvmet_execute_set_features(struct nvmet_req *req) return; switch (cdw10 & 0xff) { + case NVME_FEAT_ARBITRATION: + status = nvmet_set_feat_arbitration(req); + break; case NVME_FEAT_NUM_QUEUES: ncqr = (cdw11 >> 16) & 0xffff; nsqr = cdw11 & 0xffff; @@ -1127,6 +1369,12 @@ void nvmet_execute_set_features(struct nvmet_req *req) nvmet_set_result(req, (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16)); break; + case NVME_FEAT_IRQ_COALESCE: + status = nvmet_set_feat_irq_coalesce(req); + break; + case NVME_FEAT_IRQ_CONFIG: + status = nvmet_set_feat_irq_config(req); + break; case NVME_FEAT_KATO: status = nvmet_set_feat_kato(req); break; @@ -1134,7 +1382,7 @@ void nvmet_execute_set_features(struct nvmet_req *req) status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL); break; case NVME_FEAT_HOST_ID: - status = NVME_SC_CMD_SEQ_ERROR | NVME_STATUS_DNR; + status = nvmet_set_feat_host_id(req); break; case NVME_FEAT_WRITE_PROTECT: status = nvmet_set_feat_write_protect(req); @@ -1171,6 +1419,79 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) return 0; } +static u16 nvmet_get_feat_irq_coalesce(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_feat_irq_coalesce irqc = { }; + u16 status; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_COALESCE, &irqc); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, ((u32)irqc.time << 8) | (u32)irqc.thr); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_get_feat_irq_config(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + u32 iv = le32_to_cpu(req->cmd->common.cdw11) & 0xffff; + struct nvmet_feat_irq_config irqcfg = { .iv = iv }; + u16 status; + + /* + * This feature is not supported for fabrics controllers and mandatory + * for PCI controllers. + */ + if (!nvmet_is_pci_ctrl(ctrl)) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_IRQ_CONFIG, &irqcfg); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, ((u32)irqcfg.cd << 16) | iv); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_get_feat_arbitration(struct nvmet_req *req) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_feat_arbitration arb = { }; + u16 status; + + if (!ctrl->ops->get_feature) { + req->error_loc = offsetof(struct nvme_common_command, cdw10); + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + + status = ctrl->ops->get_feature(ctrl, NVME_FEAT_ARBITRATION, &arb); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_set_result(req, + ((u32)arb.hpw << 24) | + ((u32)arb.mpw << 16) | + ((u32)arb.lpw << 8) | + (arb.ab & 0x3)); + + return NVME_SC_SUCCESS; +} + void nvmet_get_feat_kato(struct nvmet_req *req) { nvmet_set_result(req, req->sq->ctrl->kato * 1000); @@ -1197,21 +1518,24 @@ void nvmet_execute_get_features(struct nvmet_req *req) * need to come up with some fake values for these. */ #if 0 - case NVME_FEAT_ARBITRATION: - break; case NVME_FEAT_POWER_MGMT: break; case NVME_FEAT_TEMP_THRESH: break; case NVME_FEAT_ERR_RECOVERY: break; + case NVME_FEAT_WRITE_ATOMIC: + break; +#endif + case NVME_FEAT_ARBITRATION: + status = nvmet_get_feat_arbitration(req); + break; case NVME_FEAT_IRQ_COALESCE: + status = nvmet_get_feat_irq_coalesce(req); break; case NVME_FEAT_IRQ_CONFIG: + status = nvmet_get_feat_irq_config(req); break; - case NVME_FEAT_WRITE_ATOMIC: - break; -#endif case NVME_FEAT_ASYNC_EVENT: nvmet_get_feat_async_event(req); break; @@ -1292,6 +1616,27 @@ out: nvmet_req_complete(req, status); } +u32 nvmet_admin_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + if (nvme_is_fabrics(cmd)) + return nvmet_fabrics_admin_cmd_data_len(req); + if (nvmet_is_disc_subsys(nvmet_req_subsys(req))) + return nvmet_discovery_cmd_data_len(req); + + switch (cmd->common.opcode) { + case nvme_admin_get_log_page: + return nvmet_get_log_page_len(cmd); + case nvme_admin_identify: + return NVME_IDENTIFY_DATA_SIZE; + case nvme_admin_get_features: + return nvmet_feat_data_len(req, le32_to_cpu(cmd->common.cdw10)); + default: + return 0; + } +} + u16 nvmet_parse_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -1306,13 +1651,30 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) if (unlikely(ret)) return ret; + /* For PCI controllers, admin commands shall not use SGL. */ + if (nvmet_is_pci_ctrl(req->sq->ctrl) && !req->sq->qid && + cmd->common.flags & NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + if (nvmet_is_passthru_req(req)) return nvmet_parse_passthru_admin_cmd(req); switch (cmd->common.opcode) { + case nvme_admin_delete_sq: + req->execute = nvmet_execute_delete_sq; + return 0; + case nvme_admin_create_sq: + req->execute = nvmet_execute_create_sq; + return 0; case nvme_admin_get_log_page: req->execute = nvmet_execute_get_log_page; return 0; + case nvme_admin_delete_cq: + req->execute = nvmet_execute_delete_cq; + return 0; + case nvme_admin_create_cq: + req->execute = nvmet_execute_create_cq; + return 0; case nvme_admin_identify: req->execute = nvmet_execute_identify; return 0; diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index eeee9e9b854c..20cad722c060 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -37,6 +37,7 @@ static struct nvmet_type_name_map nvmet_transport[] = { { NVMF_TRTYPE_RDMA, "rdma" }, { NVMF_TRTYPE_FC, "fc" }, { NVMF_TRTYPE_TCP, "tcp" }, + { NVMF_TRTYPE_PCI, "pci" }, { NVMF_TRTYPE_LOOP, "loop" }, }; @@ -46,6 +47,7 @@ static const struct nvmet_type_name_map nvmet_addr_family[] = { { NVMF_ADDR_FAMILY_IP6, "ipv6" }, { NVMF_ADDR_FAMILY_IB, "ib" }, { NVMF_ADDR_FAMILY_FC, "fc" }, + { NVMF_ADDR_FAMILY_PCI, "pci" }, { NVMF_ADDR_FAMILY_LOOP, "loop" }, }; @@ -1412,6 +1414,49 @@ out_unlock: } CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max); +static ssize_t nvmet_subsys_attr_vendor_id_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "0x%x\n", to_subsys(item)->vendor_id); +} + +static ssize_t nvmet_subsys_attr_vendor_id_store(struct config_item *item, + const char *page, size_t count) +{ + u16 vid; + + if (kstrtou16(page, 0, &vid)) + return -EINVAL; + + down_write(&nvmet_config_sem); + to_subsys(item)->vendor_id = vid; + up_write(&nvmet_config_sem); + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_vendor_id); + +static ssize_t nvmet_subsys_attr_subsys_vendor_id_show(struct config_item *item, + char *page) +{ + return snprintf(page, PAGE_SIZE, "0x%x\n", + to_subsys(item)->subsys_vendor_id); +} + +static ssize_t nvmet_subsys_attr_subsys_vendor_id_store(struct config_item *item, + const char *page, size_t count) +{ + u16 ssvid; + + if (kstrtou16(page, 0, &ssvid)) + return -EINVAL; + + down_write(&nvmet_config_sem); + to_subsys(item)->subsys_vendor_id = ssvid; + up_write(&nvmet_config_sem); + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_subsys_vendor_id); + static ssize_t nvmet_subsys_attr_model_show(struct config_item *item, char *page) { @@ -1640,6 +1685,8 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_serial, &nvmet_subsys_attr_attr_cntlid_min, &nvmet_subsys_attr_attr_cntlid_max, + &nvmet_subsys_attr_attr_vendor_id, + &nvmet_subsys_attr_attr_subsys_vendor_id, &nvmet_subsys_attr_attr_model, &nvmet_subsys_attr_attr_qid_max, &nvmet_subsys_attr_attr_ieee_oui, @@ -1794,6 +1841,7 @@ static struct config_group *nvmet_referral_make( return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&port->entry); + port->disc_addr.trtype = NVMF_TRTYPE_MAX; config_group_init_type_name(&port->group, name, &nvmet_referral_type); return &port->group; @@ -2019,6 +2067,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, port->inline_data_size = -1; /* < 0 == let the transport choose */ port->max_queue_size = -1; /* < 0 == let the transport choose */ + port->disc_addr.trtype = NVMF_TRTYPE_MAX; port->disc_addr.portid = cpu_to_le16(portid); port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX; port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1f4e9989663b..43c9888eea90 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -818,6 +818,89 @@ static void nvmet_confirm_sq(struct percpu_ref *ref) complete(&sq->confirm_done); } +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid) +{ + if (!ctrl->sqs) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + if (cqid > ctrl->subsys->max_qid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + /* + * Note: For PCI controllers, the NVMe specifications allows multiple + * SQs to share a single CQ. However, we do not support this yet, so + * check that there is no SQ defined for a CQ. If one exist, then the + * CQ ID is invalid for creation as well as when the CQ is being + * deleted (as that would mean that the SQ was not deleted before the + * CQ). + */ + if (ctrl->sqs[cqid]) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + return NVME_SC_SUCCESS; +} + +u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, + u16 qid, u16 size) +{ + u16 status; + + status = nvmet_check_cqid(ctrl, qid); + if (status != NVME_SC_SUCCESS) + return status; + + nvmet_cq_setup(ctrl, cq, qid, size); + + return NVME_SC_SUCCESS; +} +EXPORT_SYMBOL_GPL(nvmet_cq_create); + +u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, + bool create) +{ + if (!ctrl->sqs) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + if (sqid > ctrl->subsys->max_qid) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if ((create && ctrl->sqs[sqid]) || + (!create && !ctrl->sqs[sqid])) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + return NVME_SC_SUCCESS; +} + +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, + u16 sqid, u16 size) +{ + u16 status; + int ret; + + if (!kref_get_unless_zero(&ctrl->ref)) + return NVME_SC_INTERNAL | NVME_STATUS_DNR; + + status = nvmet_check_sqid(ctrl, sqid, true); + if (status != NVME_SC_SUCCESS) + return status; + + ret = nvmet_sq_init(sq); + if (ret) { + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto ctrl_put; + } + + nvmet_sq_setup(ctrl, sq, sqid, size); + sq->ctrl = ctrl; + + return NVME_SC_SUCCESS; + +ctrl_put: + nvmet_ctrl_put(ctrl); + return status; +} +EXPORT_SYMBOL_GPL(nvmet_sq_create); + void nvmet_sq_destroy(struct nvmet_sq *sq) { struct nvmet_ctrl *ctrl = sq->ctrl; @@ -911,6 +994,33 @@ static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) return 0; } +static u32 nvmet_io_cmd_transfer_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + u32 metadata_len = 0; + + if (nvme_is_fabrics(cmd)) + return nvmet_fabrics_io_cmd_data_len(req); + + if (!req->ns) + return 0; + + switch (req->cmd->common.opcode) { + case nvme_cmd_read: + case nvme_cmd_write: + case nvme_cmd_zone_append: + if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) + metadata_len = nvmet_rw_metadata_len(req); + return nvmet_rw_data_len(req) + metadata_len; + case nvme_cmd_dsm: + return nvmet_dsm_len(req); + case nvme_cmd_zone_mgmt_recv: + return (le32_to_cpu(req->cmd->zmr.numd) + 1) << 2; + default: + return 0; + } +} + static u16 nvmet_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -1012,12 +1122,15 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, /* * For fabrics, PSDT field shall describe metadata pointer (MPTR) that * contains an address of a single contiguous physical buffer that is - * byte aligned. + * byte aligned. For PCI controllers, this is optional so not enforced. */ if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) { - req->error_loc = offsetof(struct nvme_common_command, flags); - status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; - goto fail; + if (!req->sq->ctrl || !nvmet_is_pci_ctrl(req->sq->ctrl)) { + req->error_loc = + offsetof(struct nvme_common_command, flags); + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto fail; + } } if (unlikely(!req->sq->ctrl)) @@ -1059,11 +1172,27 @@ void nvmet_req_uninit(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_uninit); +size_t nvmet_req_transfer_len(struct nvmet_req *req) +{ + if (likely(req->sq->qid != 0)) + return nvmet_io_cmd_transfer_len(req); + if (unlikely(!req->sq->ctrl)) + return nvmet_connect_cmd_data_len(req); + return nvmet_admin_cmd_data_len(req); +} +EXPORT_SYMBOL_GPL(nvmet_req_transfer_len); + bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len) { if (unlikely(len != req->transfer_len)) { + u16 status; + req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); + if (req->cmd->common.flags & NVME_CMD_SGL_ALL) + status = NVME_SC_SGL_INVALID_DATA; + else + status = NVME_SC_INVALID_FIELD; + nvmet_req_complete(req, status | NVME_STATUS_DNR); return false; } @@ -1074,8 +1203,14 @@ EXPORT_SYMBOL_GPL(nvmet_check_transfer_len); bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len) { if (unlikely(data_len > req->transfer_len)) { + u16 status; + req->error_loc = offsetof(struct nvme_common_command, dptr); - nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_STATUS_DNR); + if (req->cmd->common.flags & NVME_CMD_SGL_ALL) + status = NVME_SC_SGL_INVALID_DATA; + else + status = NVME_SC_INVALID_FIELD; + nvmet_req_complete(req, status | NVME_STATUS_DNR); return false; } @@ -1166,41 +1301,6 @@ void nvmet_req_free_sgls(struct nvmet_req *req) } EXPORT_SYMBOL_GPL(nvmet_req_free_sgls); -static inline bool nvmet_cc_en(u32 cc) -{ - return (cc >> NVME_CC_EN_SHIFT) & 0x1; -} - -static inline u8 nvmet_cc_css(u32 cc) -{ - return (cc >> NVME_CC_CSS_SHIFT) & 0x7; -} - -static inline u8 nvmet_cc_mps(u32 cc) -{ - return (cc >> NVME_CC_MPS_SHIFT) & 0xf; -} - -static inline u8 nvmet_cc_ams(u32 cc) -{ - return (cc >> NVME_CC_AMS_SHIFT) & 0x7; -} - -static inline u8 nvmet_cc_shn(u32 cc) -{ - return (cc >> NVME_CC_SHN_SHIFT) & 0x3; -} - -static inline u8 nvmet_cc_iosqes(u32 cc) -{ - return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; -} - -static inline u8 nvmet_cc_iocqes(u32 cc) -{ - return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; -} - static inline bool nvmet_css_supported(u8 cc_css) { switch (cc_css << NVME_CC_CSS_SHIFT) { @@ -1277,6 +1377,7 @@ void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; mutex_unlock(&ctrl->lock); } +EXPORT_SYMBOL_GPL(nvmet_update_cc); static void nvmet_init_cap(struct nvmet_ctrl *ctrl) { @@ -1384,15 +1485,15 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn) * Note: ctrl->subsys->lock should be held when calling this function */ static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl, - struct nvmet_req *req) + struct device *p2p_client) { struct nvmet_ns *ns; unsigned long idx; - if (!req->p2p_client) + if (!p2p_client) return; - ctrl->p2p_client = get_device(req->p2p_client); + ctrl->p2p_client = get_device(p2p_client); xa_for_each(&ctrl->subsys->namespaces, idx, ns) nvmet_p2pmem_ns_add_p2p(ctrl, ns); @@ -1421,45 +1522,44 @@ static void nvmet_fatal_error_handler(struct work_struct *work) ctrl->ops->delete_ctrl(ctrl); } -u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, - uuid_t *hostid) +struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args) { struct nvmet_subsys *subsys; struct nvmet_ctrl *ctrl; + u32 kato = args->kato; + u8 dhchap_status; int ret; - u16 status; - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; - subsys = nvmet_find_get_subsys(req->port, subsysnqn); + args->status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; + subsys = nvmet_find_get_subsys(args->port, args->subsysnqn); if (!subsys) { pr_warn("connect request for invalid subsystem %s!\n", - subsysnqn); - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn); - req->error_loc = offsetof(struct nvme_common_command, dptr); - goto out; + args->subsysnqn); + args->result = IPO_IATTR_CONNECT_DATA(subsysnqn); + args->error_loc = offsetof(struct nvme_common_command, dptr); + return NULL; } down_read(&nvmet_config_sem); - if (!nvmet_host_allowed(subsys, hostnqn)) { + if (!nvmet_host_allowed(subsys, args->hostnqn)) { pr_info("connect by host %s for subsystem %s not allowed\n", - hostnqn, subsysnqn); - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn); + args->hostnqn, args->subsysnqn); + args->result = IPO_IATTR_CONNECT_DATA(hostnqn); up_read(&nvmet_config_sem); - status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; - req->error_loc = offsetof(struct nvme_common_command, dptr); + args->status = NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; + args->error_loc = offsetof(struct nvme_common_command, dptr); goto out_put_subsystem; } up_read(&nvmet_config_sem); - status = NVME_SC_INTERNAL; + args->status = NVME_SC_INTERNAL; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) goto out_put_subsystem; mutex_init(&ctrl->lock); - ctrl->port = req->port; - ctrl->ops = req->ops; + ctrl->port = args->port; + ctrl->ops = args->ops; #ifdef CONFIG_NVME_TARGET_PASSTHRU /* By default, set loop targets to clear IDS by default */ @@ -1473,8 +1573,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); - memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); - memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); + memcpy(ctrl->subsysnqn, args->subsysnqn, NVMF_NQN_SIZE); + memcpy(ctrl->hostnqn, args->hostnqn, NVMF_NQN_SIZE); kref_init(&ctrl->ref); ctrl->subsys = subsys; @@ -1497,12 +1597,12 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, subsys->cntlid_min, subsys->cntlid_max, GFP_KERNEL); if (ret < 0) { - status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; + args->status = NVME_SC_CONNECT_CTRL_BUSY | NVME_STATUS_DNR; goto out_free_sqs; } ctrl->cntlid = ret; - uuid_copy(&ctrl->hostid, hostid); + uuid_copy(&ctrl->hostid, args->hostid); /* * Discovery controllers may use some arbitrary high value @@ -1524,12 +1624,35 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, if (ret) goto init_pr_fail; list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); - nvmet_setup_p2p_ns_map(ctrl, req); + nvmet_setup_p2p_ns_map(ctrl, args->p2p_client); nvmet_debugfs_ctrl_setup(ctrl); mutex_unlock(&subsys->lock); - *ctrlp = ctrl; - return 0; + if (args->hostid) + uuid_copy(&ctrl->hostid, args->hostid); + + dhchap_status = nvmet_setup_auth(ctrl); + if (dhchap_status) { + pr_err("Failed to setup authentication, dhchap status %u\n", + dhchap_status); + nvmet_ctrl_put(ctrl); + if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) + args->status = + NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR; + else + args->status = NVME_SC_INTERNAL; + return NULL; + } + + args->status = NVME_SC_SUCCESS; + + pr_info("Created %s controller %d for subsystem %s for NQN %s%s%s.\n", + nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", + ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, + ctrl->pi_support ? " T10-PI is enabled" : "", + nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); + + return ctrl; init_pr_fail: mutex_unlock(&subsys->lock); @@ -1543,9 +1666,9 @@ out_free_ctrl: kfree(ctrl); out_put_subsystem: nvmet_subsys_put(subsys); -out: - return status; + return NULL; } +EXPORT_SYMBOL_GPL(nvmet_alloc_ctrl); static void nvmet_ctrl_free(struct kref *ref) { @@ -1581,6 +1704,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) { kref_put(&ctrl->ref, nvmet_ctrl_free); } +EXPORT_SYMBOL_GPL(nvmet_ctrl_put); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) { diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 28843df5fa7c..df7207640506 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -224,6 +224,9 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req) } list_for_each_entry(r, &req->port->referrals, entry) { + if (r->disc_addr.trtype == NVMF_TRTYPE_PCI) + continue; + nvmet_format_discovery_entry(hdr, r, NVME_DISC_SUBSYS_NAME, r->disc_addr.traddr, @@ -352,6 +355,20 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req) nvmet_req_complete(req, stat); } +u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->common.opcode) { + case nvme_admin_get_log_page: + return nvmet_get_log_page_len(req->cmd); + case nvme_admin_identify: + return NVME_IDENTIFY_DATA_SIZE; + default: + return 0; + } +} + u16 nvmet_parse_discovery_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c index 3f2857c17d95..2022757f08dc 100644 --- a/drivers/nvme/target/fabrics-cmd-auth.c +++ b/drivers/nvme/target/fabrics-cmd-auth.c @@ -179,6 +179,11 @@ static u8 nvmet_auth_failure2(void *d) return data->rescode_exp; } +u32 nvmet_auth_send_data_len(struct nvmet_req *req) +{ + return le32_to_cpu(req->cmd->auth_send.tl); +} + void nvmet_execute_auth_send(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -206,7 +211,7 @@ void nvmet_execute_auth_send(struct nvmet_req *req) offsetof(struct nvmf_auth_send_command, spsp1); goto done; } - tl = le32_to_cpu(req->cmd->auth_send.tl); + tl = nvmet_auth_send_data_len(req); if (!tl) { status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = @@ -429,6 +434,11 @@ static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) data->rescode_exp = req->sq->dhchap_status; } +u32 nvmet_auth_receive_data_len(struct nvmet_req *req) +{ + return le32_to_cpu(req->cmd->auth_receive.al); +} + void nvmet_execute_auth_receive(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -454,7 +464,7 @@ void nvmet_execute_auth_receive(struct nvmet_req *req) offsetof(struct nvmf_auth_receive_command, spsp1); goto done; } - al = le32_to_cpu(req->cmd->auth_receive.al); + al = nvmet_auth_receive_data_len(req); if (!al) { status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; req->error_loc = diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c index c49904ebb6c2..a7ff05b3be29 100644 --- a/drivers/nvme/target/fabrics-cmd.c +++ b/drivers/nvme/target/fabrics-cmd.c @@ -85,6 +85,22 @@ static void nvmet_execute_prop_get(struct nvmet_req *req) nvmet_req_complete(req, status); } +u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->fabrics.fctype) { +#ifdef CONFIG_NVME_TARGET_AUTH + case nvme_fabrics_type_auth_send: + return nvmet_auth_send_data_len(req); + case nvme_fabrics_type_auth_receive: + return nvmet_auth_receive_data_len(req); +#endif + default: + return 0; + } +} + u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -114,6 +130,22 @@ u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req) return 0; } +u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + switch (cmd->fabrics.fctype) { +#ifdef CONFIG_NVME_TARGET_AUTH + case nvme_fabrics_type_auth_send: + return nvmet_auth_send_data_len(req); + case nvme_fabrics_type_auth_receive: + return nvmet_auth_receive_data_len(req); +#endif + default: + return 0; + } +} + u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -213,73 +245,67 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req) struct nvmf_connect_command *c = &req->cmd->connect; struct nvmf_connect_data *d; struct nvmet_ctrl *ctrl = NULL; - u16 status; - u8 dhchap_status; + struct nvmet_alloc_ctrl_args args = { + .port = req->port, + .ops = req->ops, + .p2p_client = req->p2p_client, + .kato = le32_to_cpu(c->kato), + }; if (!nvmet_check_transfer_len(req, sizeof(struct nvmf_connect_data))) return; d = kmalloc(sizeof(*d), GFP_KERNEL); if (!d) { - status = NVME_SC_INTERNAL; + args.status = NVME_SC_INTERNAL; goto complete; } - status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); - if (status) + args.status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d)); + if (args.status) goto out; if (c->recfmt != 0) { pr_warn("invalid connect version (%d).\n", le16_to_cpu(c->recfmt)); - req->error_loc = offsetof(struct nvmf_connect_command, recfmt); - status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; + args.error_loc = offsetof(struct nvmf_connect_command, recfmt); + args.status = NVME_SC_CONNECT_FORMAT | NVME_STATUS_DNR; goto out; } if (unlikely(d->cntlid != cpu_to_le16(0xffff))) { pr_warn("connect attempt for invalid controller ID %#x\n", d->cntlid); - status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; - req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid); + args.status = NVME_SC_CONNECT_INVALID_PARAM | NVME_STATUS_DNR; + args.result = IPO_IATTR_CONNECT_DATA(cntlid); goto out; } d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0'; - status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req, - le32_to_cpu(c->kato), &ctrl, &d->hostid); - if (status) - goto out; - dhchap_status = nvmet_setup_auth(ctrl); - if (dhchap_status) { - pr_err("Failed to setup authentication, dhchap status %u\n", - dhchap_status); - nvmet_ctrl_put(ctrl); - if (dhchap_status == NVME_AUTH_DHCHAP_FAILURE_FAILED) - status = (NVME_SC_CONNECT_INVALID_HOST | NVME_STATUS_DNR); - else - status = NVME_SC_INTERNAL; + args.subsysnqn = d->subsysnqn; + args.hostnqn = d->hostnqn; + args.hostid = &d->hostid; + args.kato = c->kato; + + ctrl = nvmet_alloc_ctrl(&args); + if (!ctrl) goto out; - } - status = nvmet_install_queue(ctrl, req); - if (status) { + args.status = nvmet_install_queue(ctrl, req); + if (args.status) { nvmet_ctrl_put(ctrl); goto out; } - pr_info("creating %s controller %d for subsystem %s for NQN %s%s%s.\n", - nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm", - ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn, - ctrl->pi_support ? " T10-PI is enabled" : "", - nvmet_has_auth(ctrl) ? " with DH-HMAC-CHAP" : ""); - req->cqe->result.u32 = cpu_to_le32(nvmet_connect_result(ctrl)); + args.result = cpu_to_le32(nvmet_connect_result(ctrl)); out: kfree(d); complete: - nvmet_req_complete(req, status); + req->error_loc = args.error_loc; + req->cqe->result.u32 = args.result; + nvmet_req_complete(req, args.status); } static void nvmet_execute_io_connect(struct nvmet_req *req) @@ -343,6 +369,17 @@ out_ctrl_put: goto out; } +u32 nvmet_connect_cmd_data_len(struct nvmet_req *req) +{ + struct nvme_command *cmd = req->cmd; + + if (!nvme_is_fabrics(cmd) || + cmd->fabrics.fctype != nvme_fabrics_type_connect) + return 0; + + return sizeof(struct nvmf_connect_data); +} + u16 nvmet_parse_connect_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 0bda83d0fc3e..6380b60fd490 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -272,6 +272,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req) iter_flags = SG_MITER_FROM_SG; } + if (req->cmd->rw.control & NVME_RW_LR) + opf |= REQ_FAILFAST_DEV; + if (is_pci_p2pdma_page(sg_page(req->sg))) opf |= REQ_NOMERGE; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 58328b35dc96..f4df458df9db 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -238,6 +238,8 @@ struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_sq **sqs; + void *drvdata; + bool reset_tbkas; struct mutex lock; @@ -324,6 +326,8 @@ struct nvmet_subsys { struct config_group namespaces_group; struct config_group allowed_hosts_group; + u16 vendor_id; + u16 subsys_vendor_id; char *model_number; u32 ieee_oui; char *firmware_rev; @@ -404,6 +408,18 @@ struct nvmet_fabrics_ops { void (*discovery_chg)(struct nvmet_port *port); u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); + + /* Operations mandatory for PCI target controllers */ + u16 (*create_sq)(struct nvmet_ctrl *ctrl, u16 sqid, u16 flags, + u16 qsize, u64 prp1); + u16 (*delete_sq)(struct nvmet_ctrl *ctrl, u16 sqid); + u16 (*create_cq)(struct nvmet_ctrl *ctrl, u16 cqid, u16 flags, + u16 qsize, u64 prp1, u16 irq_vector); + u16 (*delete_cq)(struct nvmet_ctrl *ctrl, u16 cqid); + u16 (*set_feature)(const struct nvmet_ctrl *ctrl, u8 feat, + void *feat_data); + u16 (*get_feature)(const struct nvmet_ctrl *ctrl, u8 feat, + void *feat_data); }; #define NVMET_MAX_INLINE_BIOVEC 8 @@ -513,18 +529,24 @@ void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); u16 nvmet_parse_connect_cmd(struct nvmet_req *req); +u32 nvmet_connect_cmd_data_len(struct nvmet_req *req); void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req); +u32 nvmet_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_admin_cmd(struct nvmet_req *req); +u32 nvmet_discovery_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req); +u32 nvmet_fabrics_admin_cmd_data_len(struct nvmet_req *req); u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req); +u32 nvmet_fabrics_io_cmd_data_len(struct nvmet_req *req); bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); void nvmet_req_uninit(struct nvmet_req *req); +size_t nvmet_req_transfer_len(struct nvmet_req *req); bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); void nvmet_req_complete(struct nvmet_req *req, u16 status); @@ -535,19 +557,37 @@ void nvmet_execute_set_features(struct nvmet_req *req); void nvmet_execute_get_features(struct nvmet_req *req); void nvmet_execute_keep_alive(struct nvmet_req *req); +u16 nvmet_check_cqid(struct nvmet_ctrl *ctrl, u16 cqid); void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, u16 size); +u16 nvmet_cq_create(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, + u16 size); +u16 nvmet_check_sqid(struct nvmet_ctrl *ctrl, u16 sqid, bool create); void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, u16 size); +u16 nvmet_sq_create(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, + u16 size); void nvmet_sq_destroy(struct nvmet_sq *sq); int nvmet_sq_init(struct nvmet_sq *sq); void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); -u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, - struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp, - uuid_t *hostid); + +struct nvmet_alloc_ctrl_args { + struct nvmet_port *port; + char *subsysnqn; + char *hostnqn; + uuid_t *hostid; + const struct nvmet_fabrics_ops *ops; + struct device *p2p_client; + u32 kato; + u32 result; + u16 error_loc; + u16 status; +}; + +struct nvmet_ctrl *nvmet_alloc_ctrl(struct nvmet_alloc_ctrl_args *args); struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, struct nvmet_req *req); @@ -689,6 +729,11 @@ static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys) return subsys->type != NVME_NQN_NVME; } +static inline bool nvmet_is_pci_ctrl(struct nvmet_ctrl *ctrl) +{ + return ctrl->port->disc_addr.trtype == NVMF_TRTYPE_PCI; +} + #ifdef CONFIG_NVME_TARGET_PASSTHRU void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); @@ -730,6 +775,41 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl); u16 errno_to_nvme_status(struct nvmet_req *req, int errno); u16 nvmet_report_invalid_opcode(struct nvmet_req *req); +static inline bool nvmet_cc_en(u32 cc) +{ + return (cc >> NVME_CC_EN_SHIFT) & 0x1; +} + +static inline u8 nvmet_cc_css(u32 cc) +{ + return (cc >> NVME_CC_CSS_SHIFT) & 0x7; +} + +static inline u8 nvmet_cc_mps(u32 cc) +{ + return (cc >> NVME_CC_MPS_SHIFT) & 0xf; +} + +static inline u8 nvmet_cc_ams(u32 cc) +{ + return (cc >> NVME_CC_AMS_SHIFT) & 0x7; +} + +static inline u8 nvmet_cc_shn(u32 cc) +{ + return (cc >> NVME_CC_SHN_SHIFT) & 0x3; +} + +static inline u8 nvmet_cc_iosqes(u32 cc) +{ + return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf; +} + +static inline u8 nvmet_cc_iocqes(u32 cc) +{ + return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf; +} + /* Convert a 32-bit number to a 16-bit 0's based number */ static inline __le16 to0based(u32 a) { @@ -766,7 +846,9 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) } #ifdef CONFIG_NVME_TARGET_AUTH +u32 nvmet_auth_send_data_len(struct nvmet_req *req); void nvmet_execute_auth_send(struct nvmet_req *req); +u32 nvmet_auth_receive_data_len(struct nvmet_req *req); void nvmet_execute_auth_receive(struct nvmet_req *req); int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, bool set_ctrl); @@ -824,4 +906,26 @@ static inline void nvmet_pr_put_ns_pc_ref(struct nvmet_pr_per_ctrl_ref *pc_ref) { percpu_ref_put(&pc_ref->ref); } + +/* + * Data for the get_feature() and set_feature() operations of PCI target + * controllers. + */ +struct nvmet_feat_irq_coalesce { + u8 thr; + u8 time; +}; + +struct nvmet_feat_irq_config { + u16 iv; + bool cd; +}; + +struct nvmet_feat_arbitration { + u8 hpw; + u8 mpw; + u8 lpw; + u8 ab; +}; + #endif /* _NVMET_H */ diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c new file mode 100644 index 000000000000..ac30b42cc622 --- /dev/null +++ b/drivers/nvme/target/pci-epf.c @@ -0,0 +1,2591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMe PCI Endpoint Function target driver. + * + * Copyright (c) 2024, Western Digital Corporation or its affiliates. + * Copyright (c) 2024, Rick Wertenbroek <rick.wertenbroek@gmail.com> + * REDS Institute, HEIG-VD, HES-SO, Switzerland + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/io.h> +#include <linux/mempool.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvme.h> +#include <linux/pci_ids.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> +#include <linux/pci_regs.h> +#include <linux/slab.h> + +#include "nvmet.h" + +static LIST_HEAD(nvmet_pci_epf_ports); +static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex); + +/* + * Default and maximum allowed data transfer size. For the default, + * allow up to 128 page-sized segments. For the maximum allowed, + * use 4 times the default (which is completely arbitrary). + */ +#define NVMET_PCI_EPF_MAX_SEGS 128 +#define NVMET_PCI_EPF_MDTS_KB \ + (NVMET_PCI_EPF_MAX_SEGS << (PAGE_SHIFT - 10)) +#define NVMET_PCI_EPF_MAX_MDTS_KB (NVMET_PCI_EPF_MDTS_KB * 4) + +/* + * IRQ vector coalescing threshold: by default, post 8 CQEs before raising an + * interrupt vector to the host. This default 8 is completely arbitrary and can + * be changed by the host with a nvme_set_features command. + */ +#define NVMET_PCI_EPF_IV_THRESHOLD 8 + +/* + * BAR CC register and SQ polling intervals. + */ +#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5) +#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5) +#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000) + +/* + * SQ arbitration burst default: fetch at most 8 commands at a time from an SQ. + */ +#define NVMET_PCI_EPF_SQ_AB 8 + +/* + * Handling of CQs is normally immediate, unless we fail to map a CQ or the CQ + * is full, in which case we retry the CQ processing after this interval. + */ +#define NVMET_PCI_EPF_CQ_RETRY_INTERVAL msecs_to_jiffies(1) + +enum nvmet_pci_epf_queue_flags { + NVMET_PCI_EPF_Q_IS_SQ = 0, /* The queue is a submission queue */ + NVMET_PCI_EPF_Q_LIVE, /* The queue is live */ + NVMET_PCI_EPF_Q_IRQ_ENABLED, /* IRQ is enabled for this queue */ +}; + +/* + * IRQ vector descriptor. + */ +struct nvmet_pci_epf_irq_vector { + unsigned int vector; + unsigned int ref; + bool cd; + int nr_irqs; +}; + +struct nvmet_pci_epf_queue { + union { + struct nvmet_sq nvme_sq; + struct nvmet_cq nvme_cq; + }; + struct nvmet_pci_epf_ctrl *ctrl; + unsigned long flags; + + u64 pci_addr; + size_t pci_size; + struct pci_epc_map pci_map; + + u16 qid; + u16 depth; + u16 vector; + u16 head; + u16 tail; + u16 phase; + u32 db; + + size_t qes; + + struct nvmet_pci_epf_irq_vector *iv; + struct workqueue_struct *iod_wq; + struct delayed_work work; + spinlock_t lock; + struct list_head list; +}; + +/* + * PCI Root Complex (RC) address data segment for mapping an admin or + * I/O command buffer @buf of @length bytes to the PCI address @pci_addr. + */ +struct nvmet_pci_epf_segment { + void *buf; + u64 pci_addr; + u32 length; +}; + +/* + * Command descriptors. + */ +struct nvmet_pci_epf_iod { + struct list_head link; + + struct nvmet_req req; + struct nvme_command cmd; + struct nvme_completion cqe; + unsigned int status; + + struct nvmet_pci_epf_ctrl *ctrl; + + struct nvmet_pci_epf_queue *sq; + struct nvmet_pci_epf_queue *cq; + + /* Data transfer size and direction for the command. */ + size_t data_len; + enum dma_data_direction dma_dir; + + /* + * PCI Root Complex (RC) address data segments: if nr_data_segs is 1, we + * use only @data_seg. Otherwise, the array of segments @data_segs is + * allocated to manage multiple PCI address data segments. @data_sgl and + * @data_sgt are used to setup the command request for execution by the + * target core. + */ + unsigned int nr_data_segs; + struct nvmet_pci_epf_segment data_seg; + struct nvmet_pci_epf_segment *data_segs; + struct scatterlist data_sgl; + struct sg_table data_sgt; + + struct work_struct work; + struct completion done; +}; + +/* + * PCI target controller private data. + */ +struct nvmet_pci_epf_ctrl { + struct nvmet_pci_epf *nvme_epf; + struct nvmet_port *port; + struct nvmet_ctrl *tctrl; + struct device *dev; + + unsigned int nr_queues; + struct nvmet_pci_epf_queue *sq; + struct nvmet_pci_epf_queue *cq; + unsigned int sq_ab; + + mempool_t iod_pool; + void *bar; + u64 cap; + u32 cc; + u32 csts; + + size_t io_sqes; + size_t io_cqes; + + size_t mps_shift; + size_t mps; + size_t mps_mask; + + unsigned int mdts; + + struct delayed_work poll_cc; + struct delayed_work poll_sqs; + + struct mutex irq_lock; + struct nvmet_pci_epf_irq_vector *irq_vectors; + unsigned int irq_vector_threshold; + + bool link_up; + bool enabled; +}; + +/* + * PCI EPF driver private data. + */ +struct nvmet_pci_epf { + struct pci_epf *epf; + + const struct pci_epc_features *epc_features; + + void *reg_bar; + size_t msix_table_offset; + + unsigned int irq_type; + unsigned int nr_vectors; + + struct nvmet_pci_epf_ctrl ctrl; + + bool dma_enabled; + struct dma_chan *dma_tx_chan; + struct mutex dma_tx_lock; + struct dma_chan *dma_rx_chan; + struct mutex dma_rx_lock; + + struct mutex mmio_lock; + + /* PCI endpoint function configfs attributes. */ + struct config_group group; + __le16 portid; + char subsysnqn[NVMF_NQN_SIZE]; + unsigned int mdts_kb; +}; + +static inline u32 nvmet_pci_epf_bar_read32(struct nvmet_pci_epf_ctrl *ctrl, + u32 off) +{ + __le32 *bar_reg = ctrl->bar + off; + + return le32_to_cpu(READ_ONCE(*bar_reg)); +} + +static inline void nvmet_pci_epf_bar_write32(struct nvmet_pci_epf_ctrl *ctrl, + u32 off, u32 val) +{ + __le32 *bar_reg = ctrl->bar + off; + + WRITE_ONCE(*bar_reg, cpu_to_le32(val)); +} + +static inline u64 nvmet_pci_epf_bar_read64(struct nvmet_pci_epf_ctrl *ctrl, + u32 off) +{ + return (u64)nvmet_pci_epf_bar_read32(ctrl, off) | + ((u64)nvmet_pci_epf_bar_read32(ctrl, off + 4) << 32); +} + +static inline void nvmet_pci_epf_bar_write64(struct nvmet_pci_epf_ctrl *ctrl, + u32 off, u64 val) +{ + nvmet_pci_epf_bar_write32(ctrl, off, val & 0xFFFFFFFF); + nvmet_pci_epf_bar_write32(ctrl, off + 4, (val >> 32) & 0xFFFFFFFF); +} + +static inline int nvmet_pci_epf_mem_map(struct nvmet_pci_epf *nvme_epf, + u64 pci_addr, size_t size, struct pci_epc_map *map) +{ + struct pci_epf *epf = nvme_epf->epf; + + return pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no, + pci_addr, size, map); +} + +static inline void nvmet_pci_epf_mem_unmap(struct nvmet_pci_epf *nvme_epf, + struct pci_epc_map *map) +{ + struct pci_epf *epf = nvme_epf->epf; + + pci_epc_mem_unmap(epf->epc, epf->func_no, epf->vfunc_no, map); +} + +struct nvmet_pci_epf_dma_filter { + struct device *dev; + u32 dma_mask; +}; + +static bool nvmet_pci_epf_dma_filter(struct dma_chan *chan, void *arg) +{ + struct nvmet_pci_epf_dma_filter *filter = arg; + struct dma_slave_caps caps; + + memset(&caps, 0, sizeof(caps)); + dma_get_slave_caps(chan, &caps); + + return chan->device->dev == filter->dev && + (filter->dma_mask & caps.directions); +} + +static void nvmet_pci_epf_init_dma(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + struct device *dev = &epf->dev; + struct nvmet_pci_epf_dma_filter filter; + struct dma_chan *chan; + dma_cap_mask_t mask; + + mutex_init(&nvme_epf->dma_rx_lock); + mutex_init(&nvme_epf->dma_tx_lock); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + filter.dev = epf->epc->dev.parent; + filter.dma_mask = BIT(DMA_DEV_TO_MEM); + + chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); + if (!chan) + goto out_dma_no_rx; + + nvme_epf->dma_rx_chan = chan; + + filter.dma_mask = BIT(DMA_MEM_TO_DEV); + chan = dma_request_channel(mask, nvmet_pci_epf_dma_filter, &filter); + if (!chan) + goto out_dma_no_tx; + + nvme_epf->dma_tx_chan = chan; + + nvme_epf->dma_enabled = true; + + dev_dbg(dev, "Using DMA RX channel %s, maximum segment size %u B\n", + dma_chan_name(chan), + dma_get_max_seg_size(dmaengine_get_dma_device(chan))); + + dev_dbg(dev, "Using DMA TX channel %s, maximum segment size %u B\n", + dma_chan_name(chan), + dma_get_max_seg_size(dmaengine_get_dma_device(chan))); + + return; + +out_dma_no_tx: + dma_release_channel(nvme_epf->dma_rx_chan); + nvme_epf->dma_rx_chan = NULL; + +out_dma_no_rx: + mutex_destroy(&nvme_epf->dma_rx_lock); + mutex_destroy(&nvme_epf->dma_tx_lock); + nvme_epf->dma_enabled = false; + + dev_info(&epf->dev, "DMA not supported, falling back to MMIO\n"); +} + +static void nvmet_pci_epf_deinit_dma(struct nvmet_pci_epf *nvme_epf) +{ + if (!nvme_epf->dma_enabled) + return; + + dma_release_channel(nvme_epf->dma_tx_chan); + nvme_epf->dma_tx_chan = NULL; + dma_release_channel(nvme_epf->dma_rx_chan); + nvme_epf->dma_rx_chan = NULL; + mutex_destroy(&nvme_epf->dma_rx_lock); + mutex_destroy(&nvme_epf->dma_tx_lock); + nvme_epf->dma_enabled = false; +} + +static int nvmet_pci_epf_dma_transfer(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + struct pci_epf *epf = nvme_epf->epf; + struct dma_async_tx_descriptor *desc; + struct dma_slave_config sconf = {}; + struct device *dev = &epf->dev; + struct device *dma_dev; + struct dma_chan *chan; + dma_cookie_t cookie; + dma_addr_t dma_addr; + struct mutex *lock; + int ret; + + switch (dir) { + case DMA_FROM_DEVICE: + lock = &nvme_epf->dma_rx_lock; + chan = nvme_epf->dma_rx_chan; + sconf.direction = DMA_DEV_TO_MEM; + sconf.src_addr = seg->pci_addr; + break; + case DMA_TO_DEVICE: + lock = &nvme_epf->dma_tx_lock; + chan = nvme_epf->dma_tx_chan; + sconf.direction = DMA_MEM_TO_DEV; + sconf.dst_addr = seg->pci_addr; + break; + default: + return -EINVAL; + } + + mutex_lock(lock); + + dma_dev = dmaengine_get_dma_device(chan); + dma_addr = dma_map_single(dma_dev, seg->buf, seg->length, dir); + ret = dma_mapping_error(dma_dev, dma_addr); + if (ret) + goto unlock; + + ret = dmaengine_slave_config(chan, &sconf); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto unmap; + } + + desc = dmaengine_prep_slave_single(chan, dma_addr, seg->length, + sconf.direction, DMA_CTRL_ACK); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto unmap; + } + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit (err=%d)\n", ret); + goto unmap; + } + + if (dma_sync_wait(chan, cookie) != DMA_COMPLETE) { + dev_err(dev, "DMA transfer failed\n"); + ret = -EIO; + } + + dmaengine_terminate_sync(chan); + +unmap: + dma_unmap_single(dma_dev, dma_addr, seg->length, dir); + +unlock: + mutex_unlock(lock); + + return ret; +} + +static int nvmet_pci_epf_mmio_transfer(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + u64 pci_addr = seg->pci_addr; + u32 length = seg->length; + void *buf = seg->buf; + struct pci_epc_map map; + int ret = -EINVAL; + + /* + * Note: MMIO transfers do not need serialization but this is a + * simple way to avoid using too many mapping windows. + */ + mutex_lock(&nvme_epf->mmio_lock); + + while (length) { + ret = nvmet_pci_epf_mem_map(nvme_epf, pci_addr, length, &map); + if (ret) + break; + + switch (dir) { + case DMA_FROM_DEVICE: + memcpy_fromio(buf, map.virt_addr, map.pci_size); + break; + case DMA_TO_DEVICE: + memcpy_toio(map.virt_addr, buf, map.pci_size); + break; + default: + ret = -EINVAL; + goto unlock; + } + + pci_addr += map.pci_size; + buf += map.pci_size; + length -= map.pci_size; + + nvmet_pci_epf_mem_unmap(nvme_epf, &map); + } + +unlock: + mutex_unlock(&nvme_epf->mmio_lock); + + return ret; +} + +static inline int nvmet_pci_epf_transfer_seg(struct nvmet_pci_epf *nvme_epf, + struct nvmet_pci_epf_segment *seg, enum dma_data_direction dir) +{ + if (nvme_epf->dma_enabled) + return nvmet_pci_epf_dma_transfer(nvme_epf, seg, dir); + + return nvmet_pci_epf_mmio_transfer(nvme_epf, seg, dir); +} + +static inline int nvmet_pci_epf_transfer(struct nvmet_pci_epf_ctrl *ctrl, + void *buf, u64 pci_addr, u32 length, + enum dma_data_direction dir) +{ + struct nvmet_pci_epf_segment seg = { + .buf = buf, + .pci_addr = pci_addr, + .length = length, + }; + + return nvmet_pci_epf_transfer_seg(ctrl->nvme_epf, &seg, dir); +} + +static int nvmet_pci_epf_alloc_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) +{ + ctrl->irq_vectors = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_irq_vector), + GFP_KERNEL); + if (!ctrl->irq_vectors) + return -ENOMEM; + + mutex_init(&ctrl->irq_lock); + + return 0; +} + +static void nvmet_pci_epf_free_irq_vectors(struct nvmet_pci_epf_ctrl *ctrl) +{ + if (ctrl->irq_vectors) { + mutex_destroy(&ctrl->irq_lock); + kfree(ctrl->irq_vectors); + ctrl->irq_vectors = NULL; + } +} + +static struct nvmet_pci_epf_irq_vector * +nvmet_pci_epf_find_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + int i; + + lockdep_assert_held(&ctrl->irq_lock); + + for (i = 0; i < ctrl->nr_queues; i++) { + iv = &ctrl->irq_vectors[i]; + if (iv->ref && iv->vector == vector) + return iv; + } + + return NULL; +} + +static struct nvmet_pci_epf_irq_vector * +nvmet_pci_epf_add_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + int i; + + mutex_lock(&ctrl->irq_lock); + + iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); + if (iv) { + iv->ref++; + goto unlock; + } + + for (i = 0; i < ctrl->nr_queues; i++) { + iv = &ctrl->irq_vectors[i]; + if (!iv->ref) + break; + } + + if (WARN_ON_ONCE(!iv)) + goto unlock; + + iv->ref = 1; + iv->vector = vector; + iv->nr_irqs = 0; + +unlock: + mutex_unlock(&ctrl->irq_lock); + + return iv; +} + +static void nvmet_pci_epf_remove_irq_vector(struct nvmet_pci_epf_ctrl *ctrl, + u16 vector) +{ + struct nvmet_pci_epf_irq_vector *iv; + + mutex_lock(&ctrl->irq_lock); + + iv = nvmet_pci_epf_find_irq_vector(ctrl, vector); + if (iv) { + iv->ref--; + if (!iv->ref) { + iv->vector = 0; + iv->nr_irqs = 0; + } + } + + mutex_unlock(&ctrl->irq_lock); +} + +static bool nvmet_pci_epf_should_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *cq, bool force) +{ + struct nvmet_pci_epf_irq_vector *iv = cq->iv; + bool ret; + + if (!test_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags)) + return false; + + /* IRQ coalescing for the admin queue is not allowed. */ + if (!cq->qid) + return true; + + if (iv->cd) + return true; + + if (force) { + ret = iv->nr_irqs > 0; + } else { + iv->nr_irqs++; + ret = iv->nr_irqs >= ctrl->irq_vector_threshold; + } + if (ret) + iv->nr_irqs = 0; + + return ret; +} + +static void nvmet_pci_epf_raise_irq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *cq, bool force) +{ + struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; + struct pci_epf *epf = nvme_epf->epf; + int ret = 0; + + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + return; + + mutex_lock(&ctrl->irq_lock); + + if (!nvmet_pci_epf_should_raise_irq(ctrl, cq, force)) + goto unlock; + + switch (nvme_epf->irq_type) { + case PCI_IRQ_MSIX: + case PCI_IRQ_MSI: + ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, + nvme_epf->irq_type, cq->vector + 1); + if (!ret) + break; + /* + * If we got an error, it is likely because the host is using + * legacy IRQs (e.g. BIOS, grub). + */ + fallthrough; + case PCI_IRQ_INTX: + ret = pci_epc_raise_irq(epf->epc, epf->func_no, epf->vfunc_no, + PCI_IRQ_INTX, 0); + break; + default: + WARN_ON_ONCE(1); + ret = -EINVAL; + break; + } + + if (ret) + dev_err(ctrl->dev, "Failed to raise IRQ (err=%d)\n", ret); + +unlock: + mutex_unlock(&ctrl->irq_lock); +} + +static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) +{ + return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); +} + +static void nvmet_pci_epf_exec_iod_work(struct work_struct *work); + +static struct nvmet_pci_epf_iod * +nvmet_pci_epf_alloc_iod(struct nvmet_pci_epf_queue *sq) +{ + struct nvmet_pci_epf_ctrl *ctrl = sq->ctrl; + struct nvmet_pci_epf_iod *iod; + + iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); + if (unlikely(!iod)) + return NULL; + + memset(iod, 0, sizeof(*iod)); + iod->req.cmd = &iod->cmd; + iod->req.cqe = &iod->cqe; + iod->req.port = ctrl->port; + iod->ctrl = ctrl; + iod->sq = sq; + iod->cq = &ctrl->cq[sq->qid]; + INIT_LIST_HEAD(&iod->link); + iod->dma_dir = DMA_NONE; + INIT_WORK(&iod->work, nvmet_pci_epf_exec_iod_work); + init_completion(&iod->done); + + return iod; +} + +/* + * Allocate or grow a command table of PCI segments. + */ +static int nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod *iod, + int nsegs) +{ + struct nvmet_pci_epf_segment *segs; + int nr_segs = iod->nr_data_segs + nsegs; + + segs = krealloc(iod->data_segs, + nr_segs * sizeof(struct nvmet_pci_epf_segment), + GFP_KERNEL | __GFP_ZERO); + if (!segs) + return -ENOMEM; + + iod->nr_data_segs = nr_segs; + iod->data_segs = segs; + + return 0; +} + +static void nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod *iod) +{ + int i; + + if (iod->data_segs) { + for (i = 0; i < iod->nr_data_segs; i++) + kfree(iod->data_segs[i].buf); + if (iod->data_segs != &iod->data_seg) + kfree(iod->data_segs); + } + if (iod->data_sgt.nents > 1) + sg_free_table(&iod->data_sgt); + mempool_free(iod, &iod->ctrl->iod_pool); +} + +static int nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf *nvme_epf = iod->ctrl->nvme_epf; + struct nvmet_pci_epf_segment *seg = &iod->data_segs[0]; + int i, ret; + + /* Split the data transfer according to the PCI segments. */ + for (i = 0; i < iod->nr_data_segs; i++, seg++) { + ret = nvmet_pci_epf_transfer_seg(nvme_epf, seg, iod->dma_dir); + if (ret) { + iod->status = NVME_SC_DATA_XFER_ERROR | NVME_STATUS_DNR; + return ret; + } + } + + return 0; +} + +static inline u32 nvmet_pci_epf_prp_ofst(struct nvmet_pci_epf_ctrl *ctrl, + u64 prp) +{ + return prp & ctrl->mps_mask; +} + +static inline size_t nvmet_pci_epf_prp_size(struct nvmet_pci_epf_ctrl *ctrl, + u64 prp) +{ + return ctrl->mps - nvmet_pci_epf_prp_ofst(ctrl, prp); +} + +/* + * Transfer a PRP list from the host and return the number of prps. + */ +static int nvmet_pci_epf_get_prp_list(struct nvmet_pci_epf_ctrl *ctrl, u64 prp, + size_t xfer_len, __le64 *prps) +{ + size_t nr_prps = (xfer_len + ctrl->mps_mask) >> ctrl->mps_shift; + u32 length; + int ret; + + /* + * Compute the number of PRPs required for the number of bytes to + * transfer (xfer_len). If this number overflows the memory page size + * with the PRP list pointer specified, only return the space available + * in the memory page, the last PRP in there will be a PRP list pointer + * to the remaining PRPs. + */ + length = min(nvmet_pci_epf_prp_size(ctrl, prp), nr_prps << 3); + ret = nvmet_pci_epf_transfer(ctrl, prps, prp, length, DMA_FROM_DEVICE); + if (ret) + return ret; + + return length >> 3; +} + +static int nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + struct nvmet_pci_epf_segment *seg; + size_t size = 0, ofst, prp_size, xfer_len; + size_t transfer_len = iod->data_len; + int nr_segs, nr_prps = 0; + u64 pci_addr, prp; + int i = 0, ret; + __le64 *prps; + + prps = kzalloc(ctrl->mps, GFP_KERNEL); + if (!prps) + goto err_internal; + + /* + * Allocate PCI segments for the command: this considers the worst case + * scenario where all prps are discontiguous, so get as many segments + * as we can have prps. In practice, most of the time, we will have + * far less PCI segments than prps. + */ + prp = le64_to_cpu(cmd->common.dptr.prp1); + if (!prp) + goto err_invalid_field; + + ofst = nvmet_pci_epf_prp_ofst(ctrl, prp); + nr_segs = (transfer_len + ofst + ctrl->mps - 1) >> ctrl->mps_shift; + + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); + if (ret) + goto err_internal; + + /* Set the first segment using prp1. */ + seg = &iod->data_segs[0]; + seg->pci_addr = prp; + seg->length = nvmet_pci_epf_prp_size(ctrl, prp); + + size = seg->length; + pci_addr = prp + size; + nr_segs = 1; + + /* + * Now build the PCI address segments using the PRP lists, starting + * from prp2. + */ + prp = le64_to_cpu(cmd->common.dptr.prp2); + if (!prp) + goto err_invalid_field; + + while (size < transfer_len) { + xfer_len = transfer_len - size; + + if (!nr_prps) { + nr_prps = nvmet_pci_epf_get_prp_list(ctrl, prp, + xfer_len, prps); + if (nr_prps < 0) + goto err_internal; + + i = 0; + ofst = 0; + } + + /* Current entry */ + prp = le64_to_cpu(prps[i]); + if (!prp) + goto err_invalid_field; + + /* Did we reach the last PRP entry of the list? */ + if (xfer_len > ctrl->mps && i == nr_prps - 1) { + /* We need more PRPs: PRP is a list pointer. */ + nr_prps = 0; + continue; + } + + /* Only the first PRP is allowed to have an offset. */ + if (nvmet_pci_epf_prp_ofst(ctrl, prp)) + goto err_invalid_offset; + + if (prp != pci_addr) { + /* Discontiguous prp: new segment. */ + nr_segs++; + if (WARN_ON_ONCE(nr_segs > iod->nr_data_segs)) + goto err_internal; + + seg++; + seg->pci_addr = prp; + seg->length = 0; + pci_addr = prp; + } + + prp_size = min_t(size_t, ctrl->mps, xfer_len); + seg->length += prp_size; + pci_addr += prp_size; + size += prp_size; + + i++; + } + + iod->nr_data_segs = nr_segs; + ret = 0; + + if (size != transfer_len) { + dev_err(ctrl->dev, + "PRPs transfer length mismatch: got %zu B, need %zu B\n", + size, transfer_len); + goto err_internal; + } + + kfree(prps); + + return 0; + +err_invalid_offset: + dev_err(ctrl->dev, "PRPs list invalid offset\n"); + iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + goto err; + +err_invalid_field: + dev_err(ctrl->dev, "PRPs list invalid field\n"); + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + goto err; + +err_internal: + dev_err(ctrl->dev, "PRPs list internal error\n"); + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + +err: + kfree(prps); + return -EINVAL; +} + +static int nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + size_t transfer_len = iod->data_len; + int ret, nr_segs = 1; + u64 prp1, prp2 = 0; + size_t prp1_size; + + prp1 = le64_to_cpu(cmd->common.dptr.prp1); + prp1_size = nvmet_pci_epf_prp_size(ctrl, prp1); + + /* For commands crossing a page boundary, we should have prp2. */ + if (transfer_len > prp1_size) { + prp2 = le64_to_cpu(cmd->common.dptr.prp2); + if (!prp2) { + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + return -EINVAL; + } + if (nvmet_pci_epf_prp_ofst(ctrl, prp2)) { + iod->status = + NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + return -EINVAL; + } + if (prp2 != prp1 + prp1_size) + nr_segs = 2; + } + + if (nr_segs == 1) { + iod->nr_data_segs = 1; + iod->data_segs = &iod->data_seg; + iod->data_segs[0].pci_addr = prp1; + iod->data_segs[0].length = transfer_len; + return 0; + } + + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_segs); + if (ret) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return ret; + } + + iod->data_segs[0].pci_addr = prp1; + iod->data_segs[0].length = prp1_size; + iod->data_segs[1].pci_addr = prp2; + iod->data_segs[1].length = transfer_len - prp1_size; + + return 0; +} + +static int nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + u64 prp1 = le64_to_cpu(iod->cmd.common.dptr.prp1); + size_t ofst; + + /* Get the PCI address segments for the command using its PRPs. */ + ofst = nvmet_pci_epf_prp_ofst(ctrl, prp1); + if (ofst & 0x3) { + iod->status = NVME_SC_PRP_INVALID_OFFSET | NVME_STATUS_DNR; + return -EINVAL; + } + + if (iod->data_len + ofst <= ctrl->mps * 2) + return nvmet_pci_epf_iod_parse_prp_simple(ctrl, iod); + + return nvmet_pci_epf_iod_parse_prp_list(ctrl, iod); +} + +/* + * Transfer an SGL segment from the host and return the number of data + * descriptors and the next segment descriptor, if any. + */ +static struct nvme_sgl_desc * +nvmet_pci_epf_get_sgl_segment(struct nvmet_pci_epf_ctrl *ctrl, + struct nvme_sgl_desc *desc, unsigned int *nr_sgls) +{ + struct nvme_sgl_desc *sgls; + u32 length = le32_to_cpu(desc->length); + int nr_descs, ret; + void *buf; + + buf = kmalloc(length, GFP_KERNEL); + if (!buf) + return NULL; + + ret = nvmet_pci_epf_transfer(ctrl, buf, le64_to_cpu(desc->addr), length, + DMA_FROM_DEVICE); + if (ret) { + kfree(buf); + return NULL; + } + + sgls = buf; + nr_descs = length / sizeof(struct nvme_sgl_desc); + if (sgls[nr_descs - 1].type == (NVME_SGL_FMT_SEG_DESC << 4) || + sgls[nr_descs - 1].type == (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { + /* + * We have another SGL segment following this one: do not count + * it as a regular data SGL descriptor and return it to the + * caller. + */ + *desc = sgls[nr_descs - 1]; + nr_descs--; + } else { + /* We do not have another SGL segment after this one. */ + desc->length = 0; + } + + *nr_sgls = nr_descs; + + return sgls; +} + +static int nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_iod *iod) +{ + struct nvme_command *cmd = &iod->cmd; + struct nvme_sgl_desc seg = cmd->common.dptr.sgl; + struct nvme_sgl_desc *sgls = NULL; + int n = 0, i, nr_sgls; + int ret; + + /* + * We do not support inline data nor keyed SGLs, so we should be seeing + * only segment descriptors. + */ + if (seg.type != (NVME_SGL_FMT_SEG_DESC << 4) && + seg.type != (NVME_SGL_FMT_LAST_SEG_DESC << 4)) { + iod->status = NVME_SC_SGL_INVALID_TYPE | NVME_STATUS_DNR; + return -EIO; + } + + while (seg.length) { + sgls = nvmet_pci_epf_get_sgl_segment(ctrl, &seg, &nr_sgls); + if (!sgls) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return -EIO; + } + + /* Grow the PCI segment table as needed. */ + ret = nvmet_pci_epf_alloc_iod_data_segs(iod, nr_sgls); + if (ret) { + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto out; + } + + /* + * Parse the SGL descriptors to build the PCI segment table, + * checking the descriptor type as we go. + */ + for (i = 0; i < nr_sgls; i++) { + if (sgls[i].type != (NVME_SGL_FMT_DATA_DESC << 4)) { + iod->status = NVME_SC_SGL_INVALID_TYPE | + NVME_STATUS_DNR; + goto out; + } + iod->data_segs[n].pci_addr = le64_to_cpu(sgls[i].addr); + iod->data_segs[n].length = le32_to_cpu(sgls[i].length); + n++; + } + + kfree(sgls); + } + + out: + if (iod->status != NVME_SC_SUCCESS) { + kfree(sgls); + return -EIO; + } + + return 0; +} + +static int nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + struct nvme_sgl_desc *sgl = &iod->cmd.common.dptr.sgl; + + if (sgl->type == (NVME_SGL_FMT_DATA_DESC << 4)) { + /* Single data descriptor case. */ + iod->nr_data_segs = 1; + iod->data_segs = &iod->data_seg; + iod->data_seg.pci_addr = le64_to_cpu(sgl->addr); + iod->data_seg.length = le32_to_cpu(sgl->length); + return 0; + } + + return nvmet_pci_epf_iod_parse_sgl_segments(ctrl, iod); +} + +static int nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_ctrl *ctrl = iod->ctrl; + struct nvmet_req *req = &iod->req; + struct nvmet_pci_epf_segment *seg; + struct scatterlist *sg; + int ret, i; + + if (iod->data_len > ctrl->mdts) { + iod->status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + return -EINVAL; + } + + /* + * Get the PCI address segments for the command data buffer using either + * its SGLs or PRPs. + */ + if (iod->cmd.common.flags & NVME_CMD_SGL_ALL) + ret = nvmet_pci_epf_iod_parse_sgls(iod); + else + ret = nvmet_pci_epf_iod_parse_prps(iod); + if (ret) + return ret; + + /* Get a command buffer using SGLs matching the PCI segments. */ + if (iod->nr_data_segs == 1) { + sg_init_table(&iod->data_sgl, 1); + iod->data_sgt.sgl = &iod->data_sgl; + iod->data_sgt.nents = 1; + iod->data_sgt.orig_nents = 1; + } else { + ret = sg_alloc_table(&iod->data_sgt, iod->nr_data_segs, + GFP_KERNEL); + if (ret) + goto err_nomem; + } + + for_each_sgtable_sg(&iod->data_sgt, sg, i) { + seg = &iod->data_segs[i]; + seg->buf = kmalloc(seg->length, GFP_KERNEL); + if (!seg->buf) + goto err_nomem; + sg_set_buf(sg, seg->buf, seg->length); + } + + req->transfer_len = iod->data_len; + req->sg = iod->data_sgt.sgl; + req->sg_cnt = iod->data_sgt.nents; + + return 0; + +err_nomem: + iod->status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + return -ENOMEM; +} + +static void nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod *iod) +{ + struct nvmet_pci_epf_queue *cq = iod->cq; + unsigned long flags; + + /* Print an error message for failed commands, except AENs. */ + iod->status = le16_to_cpu(iod->cqe.status) >> 1; + if (iod->status && iod->cmd.common.opcode != nvme_admin_async_event) + dev_err(iod->ctrl->dev, + "CQ[%d]: Command %s (0x%x) status 0x%0x\n", + iod->sq->qid, nvmet_pci_epf_iod_name(iod), + iod->cmd.common.opcode, iod->status); + + /* + * Add the command to the list of completed commands and schedule the + * CQ work. + */ + spin_lock_irqsave(&cq->lock, flags); + list_add_tail(&iod->link, &cq->list); + queue_delayed_work(system_highpri_wq, &cq->work, 0); + spin_unlock_irqrestore(&cq->lock, flags); +} + +static void nvmet_pci_epf_drain_queue(struct nvmet_pci_epf_queue *queue) +{ + struct nvmet_pci_epf_iod *iod; + unsigned long flags; + + spin_lock_irqsave(&queue->lock, flags); + while (!list_empty(&queue->list)) { + iod = list_first_entry(&queue->list, struct nvmet_pci_epf_iod, + link); + list_del_init(&iod->link); + nvmet_pci_epf_free_iod(iod); + } + spin_unlock_irqrestore(&queue->lock, flags); +} + +static int nvmet_pci_epf_add_port(struct nvmet_port *port) +{ + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_add_tail(&port->entry, &nvmet_pci_epf_ports); + mutex_unlock(&nvmet_pci_epf_ports_mutex); + return 0; +} + +static void nvmet_pci_epf_remove_port(struct nvmet_port *port) +{ + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_del_init(&port->entry); + mutex_unlock(&nvmet_pci_epf_ports_mutex); +} + +static struct nvmet_port * +nvmet_pci_epf_find_port(struct nvmet_pci_epf_ctrl *ctrl, __le16 portid) +{ + struct nvmet_port *p, *port = NULL; + + mutex_lock(&nvmet_pci_epf_ports_mutex); + list_for_each_entry(p, &nvmet_pci_epf_ports, entry) { + if (p->disc_addr.portid == portid) { + port = p; + break; + } + } + mutex_unlock(&nvmet_pci_epf_ports_mutex); + + return port; +} + +static void nvmet_pci_epf_queue_response(struct nvmet_req *req) +{ + struct nvmet_pci_epf_iod *iod = + container_of(req, struct nvmet_pci_epf_iod, req); + + iod->status = le16_to_cpu(req->cqe->status) >> 1; + + /* If we have no data to transfer, directly complete the command. */ + if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) { + nvmet_pci_epf_complete_iod(iod); + return; + } + + complete(&iod->done); +} + +static u8 nvmet_pci_epf_get_mdts(const struct nvmet_ctrl *tctrl) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + int page_shift = NVME_CAP_MPSMIN(tctrl->cap) + 12; + + return ilog2(ctrl->mdts) - page_shift; +} + +static u16 nvmet_pci_epf_create_cq(struct nvmet_ctrl *tctrl, + u16 cqid, u16 flags, u16 qsize, u64 pci_addr, u16 vector) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; + u16 status; + + if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if (!(flags & NVME_QUEUE_PHYS_CONTIG)) + return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; + + if (flags & NVME_CQ_IRQ_ENABLED) + set_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); + + cq->pci_addr = pci_addr; + cq->qid = cqid; + cq->depth = qsize + 1; + cq->vector = vector; + cq->head = 0; + cq->tail = 0; + cq->phase = 1; + cq->db = NVME_REG_DBS + (((cqid * 2) + 1) * sizeof(u32)); + nvmet_pci_epf_bar_write32(ctrl, cq->db, 0); + + if (!cqid) + cq->qes = sizeof(struct nvme_completion); + else + cq->qes = ctrl->io_cqes; + cq->pci_size = cq->qes * cq->depth; + + cq->iv = nvmet_pci_epf_add_irq_vector(ctrl, vector); + if (!cq->iv) { + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto err; + } + + status = nvmet_cq_create(tctrl, &cq->nvme_cq, cqid, cq->depth); + if (status != NVME_SC_SUCCESS) + goto err; + + dev_dbg(ctrl->dev, "CQ[%u]: %u entries of %zu B, IRQ vector %u\n", + cqid, qsize, cq->qes, cq->vector); + + return NVME_SC_SUCCESS; + +err: + clear_bit(NVMET_PCI_EPF_Q_IRQ_ENABLED, &cq->flags); + clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags); + return status; +} + +static u16 nvmet_pci_epf_delete_cq(struct nvmet_ctrl *tctrl, u16 cqid) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *cq = &ctrl->cq[cqid]; + + if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + cancel_delayed_work_sync(&cq->work); + nvmet_pci_epf_drain_queue(cq); + nvmet_pci_epf_remove_irq_vector(ctrl, cq->vector); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pci_epf_create_sq(struct nvmet_ctrl *tctrl, + u16 sqid, u16 flags, u16 qsize, u64 pci_addr) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + u16 status; + + if (test_and_set_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + if (!(flags & NVME_QUEUE_PHYS_CONTIG)) + return NVME_SC_INVALID_QUEUE | NVME_STATUS_DNR; + + sq->pci_addr = pci_addr; + sq->qid = sqid; + sq->depth = qsize + 1; + sq->head = 0; + sq->tail = 0; + sq->phase = 0; + sq->db = NVME_REG_DBS + (sqid * 2 * sizeof(u32)); + nvmet_pci_epf_bar_write32(ctrl, sq->db, 0); + if (!sqid) + sq->qes = 1UL << NVME_ADM_SQES; + else + sq->qes = ctrl->io_sqes; + sq->pci_size = sq->qes * sq->depth; + + status = nvmet_sq_create(tctrl, &sq->nvme_sq, sqid, sq->depth); + if (status != NVME_SC_SUCCESS) + goto out_clear_bit; + + sq->iod_wq = alloc_workqueue("sq%d_wq", WQ_UNBOUND, + min_t(int, sq->depth, WQ_MAX_ACTIVE), sqid); + if (!sq->iod_wq) { + dev_err(ctrl->dev, "Failed to create SQ %d work queue\n", sqid); + status = NVME_SC_INTERNAL | NVME_STATUS_DNR; + goto out_destroy_sq; + } + + dev_dbg(ctrl->dev, "SQ[%u]: %u entries of %zu B\n", + sqid, qsize, sq->qes); + + return NVME_SC_SUCCESS; + +out_destroy_sq: + nvmet_sq_destroy(&sq->nvme_sq); +out_clear_bit: + clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags); + return status; +} + +static u16 nvmet_pci_epf_delete_sq(struct nvmet_ctrl *tctrl, u16 sqid) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_pci_epf_queue *sq = &ctrl->sq[sqid]; + + if (!test_and_clear_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + return NVME_SC_QID_INVALID | NVME_STATUS_DNR; + + flush_workqueue(sq->iod_wq); + destroy_workqueue(sq->iod_wq); + sq->iod_wq = NULL; + + nvmet_pci_epf_drain_queue(sq); + + if (sq->nvme_sq.ctrl) + nvmet_sq_destroy(&sq->nvme_sq); + + return NVME_SC_SUCCESS; +} + +static u16 nvmet_pci_epf_get_feat(const struct nvmet_ctrl *tctrl, + u8 feat, void *data) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_feat_arbitration *arb; + struct nvmet_feat_irq_coalesce *irqc; + struct nvmet_feat_irq_config *irqcfg; + struct nvmet_pci_epf_irq_vector *iv; + u16 status; + + switch (feat) { + case NVME_FEAT_ARBITRATION: + arb = data; + if (!ctrl->sq_ab) + arb->ab = 0x7; + else + arb->ab = ilog2(ctrl->sq_ab); + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_COALESCE: + irqc = data; + irqc->thr = ctrl->irq_vector_threshold; + irqc->time = 0; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_CONFIG: + irqcfg = data; + mutex_lock(&ctrl->irq_lock); + iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); + if (iv) { + irqcfg->cd = iv->cd; + status = NVME_SC_SUCCESS; + } else { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + mutex_unlock(&ctrl->irq_lock); + return status; + + default: + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } +} + +static u16 nvmet_pci_epf_set_feat(const struct nvmet_ctrl *tctrl, + u8 feat, void *data) +{ + struct nvmet_pci_epf_ctrl *ctrl = tctrl->drvdata; + struct nvmet_feat_arbitration *arb; + struct nvmet_feat_irq_coalesce *irqc; + struct nvmet_feat_irq_config *irqcfg; + struct nvmet_pci_epf_irq_vector *iv; + u16 status; + + switch (feat) { + case NVME_FEAT_ARBITRATION: + arb = data; + if (arb->ab == 0x7) + ctrl->sq_ab = 0; + else + ctrl->sq_ab = 1 << arb->ab; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_COALESCE: + /* + * Since we do not implement precise IRQ coalescing timing, + * ignore the time field. + */ + irqc = data; + ctrl->irq_vector_threshold = irqc->thr + 1; + return NVME_SC_SUCCESS; + + case NVME_FEAT_IRQ_CONFIG: + irqcfg = data; + mutex_lock(&ctrl->irq_lock); + iv = nvmet_pci_epf_find_irq_vector(ctrl, irqcfg->iv); + if (iv) { + iv->cd = irqcfg->cd; + status = NVME_SC_SUCCESS; + } else { + status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } + mutex_unlock(&ctrl->irq_lock); + return status; + + default: + return NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; + } +} + +static const struct nvmet_fabrics_ops nvmet_pci_epf_fabrics_ops = { + .owner = THIS_MODULE, + .type = NVMF_TRTYPE_PCI, + .add_port = nvmet_pci_epf_add_port, + .remove_port = nvmet_pci_epf_remove_port, + .queue_response = nvmet_pci_epf_queue_response, + .get_mdts = nvmet_pci_epf_get_mdts, + .create_cq = nvmet_pci_epf_create_cq, + .delete_cq = nvmet_pci_epf_delete_cq, + .create_sq = nvmet_pci_epf_create_sq, + .delete_sq = nvmet_pci_epf_delete_sq, + .get_feature = nvmet_pci_epf_get_feat, + .set_feature = nvmet_pci_epf_set_feat, +}; + +static void nvmet_pci_epf_cq_work(struct work_struct *work); + +static void nvmet_pci_epf_init_queue(struct nvmet_pci_epf_ctrl *ctrl, + unsigned int qid, bool sq) +{ + struct nvmet_pci_epf_queue *queue; + + if (sq) { + queue = &ctrl->sq[qid]; + set_bit(NVMET_PCI_EPF_Q_IS_SQ, &queue->flags); + } else { + queue = &ctrl->cq[qid]; + INIT_DELAYED_WORK(&queue->work, nvmet_pci_epf_cq_work); + } + queue->ctrl = ctrl; + queue->qid = qid; + spin_lock_init(&queue->lock); + INIT_LIST_HEAD(&queue->list); +} + +static int nvmet_pci_epf_alloc_queues(struct nvmet_pci_epf_ctrl *ctrl) +{ + unsigned int qid; + + ctrl->sq = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); + if (!ctrl->sq) + return -ENOMEM; + + ctrl->cq = kcalloc(ctrl->nr_queues, + sizeof(struct nvmet_pci_epf_queue), GFP_KERNEL); + if (!ctrl->cq) { + kfree(ctrl->sq); + ctrl->sq = NULL; + return -ENOMEM; + } + + for (qid = 0; qid < ctrl->nr_queues; qid++) { + nvmet_pci_epf_init_queue(ctrl, qid, true); + nvmet_pci_epf_init_queue(ctrl, qid, false); + } + + return 0; +} + +static void nvmet_pci_epf_free_queues(struct nvmet_pci_epf_ctrl *ctrl) +{ + kfree(ctrl->sq); + ctrl->sq = NULL; + kfree(ctrl->cq); + ctrl->cq = NULL; +} + +static int nvmet_pci_epf_map_queue(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *queue) +{ + struct nvmet_pci_epf *nvme_epf = ctrl->nvme_epf; + int ret; + + ret = nvmet_pci_epf_mem_map(nvme_epf, queue->pci_addr, + queue->pci_size, &queue->pci_map); + if (ret) { + dev_err(ctrl->dev, "Failed to map queue %u (err=%d)\n", + queue->qid, ret); + return ret; + } + + if (queue->pci_map.pci_size < queue->pci_size) { + dev_err(ctrl->dev, "Invalid partial mapping of queue %u\n", + queue->qid); + nvmet_pci_epf_mem_unmap(nvme_epf, &queue->pci_map); + return -ENOMEM; + } + + return 0; +} + +static inline void nvmet_pci_epf_unmap_queue(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *queue) +{ + nvmet_pci_epf_mem_unmap(ctrl->nvme_epf, &queue->pci_map); +} + +static void nvmet_pci_epf_exec_iod_work(struct work_struct *work) +{ + struct nvmet_pci_epf_iod *iod = + container_of(work, struct nvmet_pci_epf_iod, work); + struct nvmet_req *req = &iod->req; + int ret; + + if (!iod->ctrl->link_up) { + nvmet_pci_epf_free_iod(iod); + return; + } + + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &iod->sq->flags)) { + iod->status = NVME_SC_QID_INVALID | NVME_STATUS_DNR; + goto complete; + } + + if (!nvmet_req_init(req, &iod->cq->nvme_cq, &iod->sq->nvme_sq, + &nvmet_pci_epf_fabrics_ops)) + goto complete; + + iod->data_len = nvmet_req_transfer_len(req); + if (iod->data_len) { + /* + * Get the data DMA transfer direction. Here "device" means the + * PCI root-complex host. + */ + if (nvme_is_write(&iod->cmd)) + iod->dma_dir = DMA_FROM_DEVICE; + else + iod->dma_dir = DMA_TO_DEVICE; + + /* + * Setup the command data buffer and get the command data from + * the host if needed. + */ + ret = nvmet_pci_epf_alloc_iod_data_buf(iod); + if (!ret && iod->dma_dir == DMA_FROM_DEVICE) + ret = nvmet_pci_epf_transfer_iod_data(iod); + if (ret) { + nvmet_req_uninit(req); + goto complete; + } + } + + req->execute(req); + + /* + * If we do not have data to transfer after the command execution + * finishes, nvmet_pci_epf_queue_response() will complete the command + * directly. No need to wait for the completion in this case. + */ + if (!iod->data_len || iod->dma_dir != DMA_TO_DEVICE) + return; + + wait_for_completion(&iod->done); + + if (iod->status == NVME_SC_SUCCESS) { + WARN_ON_ONCE(!iod->data_len || iod->dma_dir != DMA_TO_DEVICE); + nvmet_pci_epf_transfer_iod_data(iod); + } + +complete: + nvmet_pci_epf_complete_iod(iod); +} + +static int nvmet_pci_epf_process_sq(struct nvmet_pci_epf_ctrl *ctrl, + struct nvmet_pci_epf_queue *sq) +{ + struct nvmet_pci_epf_iod *iod; + int ret, n = 0; + + sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); + while (sq->head != sq->tail && (!ctrl->sq_ab || n < ctrl->sq_ab)) { + iod = nvmet_pci_epf_alloc_iod(sq); + if (!iod) + break; + + /* Get the NVMe command submitted by the host. */ + ret = nvmet_pci_epf_transfer(ctrl, &iod->cmd, + sq->pci_addr + sq->head * sq->qes, + sq->qes, DMA_FROM_DEVICE); + if (ret) { + /* Not much we can do... */ + nvmet_pci_epf_free_iod(iod); + break; + } + + dev_dbg(ctrl->dev, "SQ[%u]: head %u, tail %u, command %s\n", + sq->qid, sq->head, sq->tail, + nvmet_pci_epf_iod_name(iod)); + + sq->head++; + if (sq->head == sq->depth) + sq->head = 0; + n++; + + queue_work_on(WORK_CPU_UNBOUND, sq->iod_wq, &iod->work); + + sq->tail = nvmet_pci_epf_bar_read32(ctrl, sq->db); + } + + return n; +} + +static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work) +{ + struct nvmet_pci_epf_ctrl *ctrl = + container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work); + struct nvmet_pci_epf_queue *sq; + unsigned long last = 0; + int i, nr_sqs; + + while (ctrl->link_up && ctrl->enabled) { + nr_sqs = 0; + /* Do round-robin arbitration. */ + for (i = 0; i < ctrl->nr_queues; i++) { + sq = &ctrl->sq[i]; + if (!test_bit(NVMET_PCI_EPF_Q_LIVE, &sq->flags)) + continue; + if (nvmet_pci_epf_process_sq(ctrl, sq)) + nr_sqs++; + } + + if (nr_sqs) { + last = jiffies; + continue; + } + + /* + * If we have not received any command on any queue for more + * than NVMET_PCI_EPF_SQ_POLL_IDLE, assume we are idle and + * reschedule. This avoids "burning" a CPU when the controller + * is idle for a long time. + */ + if (time_is_before_jiffies(last + NVMET_PCI_EPF_SQ_POLL_IDLE)) + break; + + cpu_relax(); + } + + schedule_delayed_work(&ctrl->poll_sqs, NVMET_PCI_EPF_SQ_POLL_INTERVAL); +} + +static void nvmet_pci_epf_cq_work(struct work_struct *work) +{ + struct nvmet_pci_epf_queue *cq = + container_of(work, struct nvmet_pci_epf_queue, work.work); + struct nvmet_pci_epf_ctrl *ctrl = cq->ctrl; + struct nvme_completion *cqe; + struct nvmet_pci_epf_iod *iod; + unsigned long flags; + int ret, n = 0; + + ret = nvmet_pci_epf_map_queue(ctrl, cq); + if (ret) + goto again; + + while (test_bit(NVMET_PCI_EPF_Q_LIVE, &cq->flags) && ctrl->link_up) { + + /* Check that the CQ is not full. */ + cq->head = nvmet_pci_epf_bar_read32(ctrl, cq->db); + if (cq->head == cq->tail + 1) { + ret = -EAGAIN; + break; + } + + spin_lock_irqsave(&cq->lock, flags); + iod = list_first_entry_or_null(&cq->list, + struct nvmet_pci_epf_iod, link); + if (iod) + list_del_init(&iod->link); + spin_unlock_irqrestore(&cq->lock, flags); + + if (!iod) + break; + + /* Post the IOD completion entry. */ + cqe = &iod->cqe; + cqe->status = cpu_to_le16((iod->status << 1) | cq->phase); + + dev_dbg(ctrl->dev, + "CQ[%u]: %s status 0x%x, result 0x%llx, head %u, tail %u, phase %u\n", + cq->qid, nvmet_pci_epf_iod_name(iod), iod->status, + le64_to_cpu(cqe->result.u64), cq->head, cq->tail, + cq->phase); + + memcpy_toio(cq->pci_map.virt_addr + cq->tail * cq->qes, + cqe, cq->qes); + + cq->tail++; + if (cq->tail >= cq->depth) { + cq->tail = 0; + cq->phase ^= 1; + } + + nvmet_pci_epf_free_iod(iod); + + /* Signal the host. */ + nvmet_pci_epf_raise_irq(ctrl, cq, false); + n++; + } + + nvmet_pci_epf_unmap_queue(ctrl, cq); + + /* + * We do not support precise IRQ coalescing time (100ns units as per + * NVMe specifications). So if we have posted completion entries without + * reaching the interrupt coalescing threshold, raise an interrupt. + */ + if (n) + nvmet_pci_epf_raise_irq(ctrl, cq, true); + +again: + if (ret < 0) + queue_delayed_work(system_highpri_wq, &cq->work, + NVMET_PCI_EPF_CQ_RETRY_INTERVAL); +} + +static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + u64 pci_addr, asq, acq; + u32 aqa; + u16 status, qsize; + + if (ctrl->enabled) + return 0; + + dev_info(ctrl->dev, "Enabling controller\n"); + + ctrl->mps_shift = nvmet_cc_mps(ctrl->cc) + 12; + ctrl->mps = 1UL << ctrl->mps_shift; + ctrl->mps_mask = ctrl->mps - 1; + + ctrl->io_sqes = 1UL << nvmet_cc_iosqes(ctrl->cc); + if (ctrl->io_sqes < sizeof(struct nvme_command)) { + dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n", + ctrl->io_sqes, sizeof(struct nvme_command)); + return -EINVAL; + } + + ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc); + if (ctrl->io_cqes < sizeof(struct nvme_completion)) { + dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n", + ctrl->io_sqes, sizeof(struct nvme_completion)); + return -EINVAL; + } + + /* Create the admin queue. */ + aqa = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_AQA); + asq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ASQ); + acq = nvmet_pci_epf_bar_read64(ctrl, NVME_REG_ACQ); + + qsize = (aqa & 0x0fff0000) >> 16; + pci_addr = acq & GENMASK_ULL(63, 12); + status = nvmet_pci_epf_create_cq(ctrl->tctrl, 0, + NVME_CQ_IRQ_ENABLED | NVME_QUEUE_PHYS_CONTIG, + qsize, pci_addr, 0); + if (status != NVME_SC_SUCCESS) { + dev_err(ctrl->dev, "Failed to create admin completion queue\n"); + return -EINVAL; + } + + qsize = aqa & 0x00000fff; + pci_addr = asq & GENMASK_ULL(63, 12); + status = nvmet_pci_epf_create_sq(ctrl->tctrl, 0, NVME_QUEUE_PHYS_CONTIG, + qsize, pci_addr); + if (status != NVME_SC_SUCCESS) { + dev_err(ctrl->dev, "Failed to create admin submission queue\n"); + nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); + return -EINVAL; + } + + ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB; + ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD; + ctrl->enabled = true; + + /* Start polling the controller SQs. */ + schedule_delayed_work(&ctrl->poll_sqs, 0); + + return 0; +} + +static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + int qid; + + if (!ctrl->enabled) + return; + + dev_info(ctrl->dev, "Disabling controller\n"); + + ctrl->enabled = false; + cancel_delayed_work_sync(&ctrl->poll_sqs); + + /* Delete all I/O queues first. */ + for (qid = 1; qid < ctrl->nr_queues; qid++) + nvmet_pci_epf_delete_sq(ctrl->tctrl, qid); + + for (qid = 1; qid < ctrl->nr_queues; qid++) + nvmet_pci_epf_delete_cq(ctrl->tctrl, qid); + + /* Delete the admin queue last. */ + nvmet_pci_epf_delete_sq(ctrl->tctrl, 0); + nvmet_pci_epf_delete_cq(ctrl->tctrl, 0); +} + +static void nvmet_pci_epf_poll_cc_work(struct work_struct *work) +{ + struct nvmet_pci_epf_ctrl *ctrl = + container_of(work, struct nvmet_pci_epf_ctrl, poll_cc.work); + u32 old_cc, new_cc; + int ret; + + if (!ctrl->tctrl) + return; + + old_cc = ctrl->cc; + new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC); + ctrl->cc = new_cc; + + if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) { + ret = nvmet_pci_epf_enable_ctrl(ctrl); + if (ret) + return; + ctrl->csts |= NVME_CSTS_RDY; + } + + if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) { + nvmet_pci_epf_disable_ctrl(ctrl); + ctrl->csts &= ~NVME_CSTS_RDY; + } + + if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) { + nvmet_pci_epf_disable_ctrl(ctrl); + ctrl->csts |= NVME_CSTS_SHST_CMPLT; + } + + if (!nvmet_cc_shn(new_cc) && nvmet_cc_shn(old_cc)) + ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; + + nvmet_update_cc(ctrl->tctrl, ctrl->cc); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); +} + +static void nvmet_pci_epf_init_bar(struct nvmet_pci_epf_ctrl *ctrl) +{ + struct nvmet_ctrl *tctrl = ctrl->tctrl; + + ctrl->bar = ctrl->nvme_epf->reg_bar; + + /* Copy the target controller capabilities as a base. */ + ctrl->cap = tctrl->cap; + + /* Contiguous Queues Required (CQR). */ + ctrl->cap |= 0x1ULL << 16; + + /* Set Doorbell stride to 4B (DSTRB). */ + ctrl->cap &= ~GENMASK_ULL(35, 32); + + /* Clear NVM Subsystem Reset Supported (NSSRS). */ + ctrl->cap &= ~(0x1ULL << 36); + + /* Clear Boot Partition Support (BPS). */ + ctrl->cap &= ~(0x1ULL << 45); + + /* Clear Persistent Memory Region Supported (PMRS). */ + ctrl->cap &= ~(0x1ULL << 56); + + /* Clear Controller Memory Buffer Supported (CMBS). */ + ctrl->cap &= ~(0x1ULL << 57); + + /* Controller configuration. */ + ctrl->cc = tctrl->cc & (~NVME_CC_ENABLE); + + /* Controller status. */ + ctrl->csts = ctrl->tctrl->csts; + + nvmet_pci_epf_bar_write64(ctrl, NVME_REG_CAP, ctrl->cap); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_VS, tctrl->subsys->ver); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts); + nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CC, ctrl->cc); +} + +static int nvmet_pci_epf_create_ctrl(struct nvmet_pci_epf *nvme_epf, + unsigned int max_nr_queues) +{ + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + struct nvmet_alloc_ctrl_args args = {}; + char hostnqn[NVMF_NQN_SIZE]; + uuid_t id; + int ret; + + memset(ctrl, 0, sizeof(*ctrl)); + ctrl->dev = &nvme_epf->epf->dev; + mutex_init(&ctrl->irq_lock); + ctrl->nvme_epf = nvme_epf; + ctrl->mdts = nvme_epf->mdts_kb * SZ_1K; + INIT_DELAYED_WORK(&ctrl->poll_cc, nvmet_pci_epf_poll_cc_work); + INIT_DELAYED_WORK(&ctrl->poll_sqs, nvmet_pci_epf_poll_sqs_work); + + ret = mempool_init_kmalloc_pool(&ctrl->iod_pool, + max_nr_queues * NVMET_MAX_QUEUE_SIZE, + sizeof(struct nvmet_pci_epf_iod)); + if (ret) { + dev_err(ctrl->dev, "Failed to initialize IOD mempool\n"); + return ret; + } + + ctrl->port = nvmet_pci_epf_find_port(ctrl, nvme_epf->portid); + if (!ctrl->port) { + dev_err(ctrl->dev, "Port not found\n"); + ret = -EINVAL; + goto out_mempool_exit; + } + + /* Create the target controller. */ + uuid_gen(&id); + snprintf(hostnqn, NVMF_NQN_SIZE, + "nqn.2014-08.org.nvmexpress:uuid:%pUb", &id); + args.port = ctrl->port; + args.subsysnqn = nvme_epf->subsysnqn; + memset(&id, 0, sizeof(uuid_t)); + args.hostid = &id; + args.hostnqn = hostnqn; + args.ops = &nvmet_pci_epf_fabrics_ops; + + ctrl->tctrl = nvmet_alloc_ctrl(&args); + if (!ctrl->tctrl) { + dev_err(ctrl->dev, "Failed to create target controller\n"); + ret = -ENOMEM; + goto out_mempool_exit; + } + ctrl->tctrl->drvdata = ctrl; + + /* We do not support protection information for now. */ + if (ctrl->tctrl->pi_support) { + dev_err(ctrl->dev, + "Protection information (PI) is not supported\n"); + ret = -ENOTSUPP; + goto out_put_ctrl; + } + + /* Allocate our queues, up to the maximum number. */ + ctrl->nr_queues = min(ctrl->tctrl->subsys->max_qid + 1, max_nr_queues); + ret = nvmet_pci_epf_alloc_queues(ctrl); + if (ret) + goto out_put_ctrl; + + /* + * Allocate the IRQ vectors descriptors. We cannot have more than the + * maximum number of queues. + */ + ret = nvmet_pci_epf_alloc_irq_vectors(ctrl); + if (ret) + goto out_free_queues; + + dev_info(ctrl->dev, + "New PCI ctrl \"%s\", %u I/O queues, mdts %u B\n", + ctrl->tctrl->subsys->subsysnqn, ctrl->nr_queues - 1, + ctrl->mdts); + + /* Initialize BAR 0 using the target controller CAP. */ + nvmet_pci_epf_init_bar(ctrl); + + return 0; + +out_free_queues: + nvmet_pci_epf_free_queues(ctrl); +out_put_ctrl: + nvmet_ctrl_put(ctrl->tctrl); + ctrl->tctrl = NULL; +out_mempool_exit: + mempool_exit(&ctrl->iod_pool); + return ret; +} + +static void nvmet_pci_epf_start_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL); +} + +static void nvmet_pci_epf_stop_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + cancel_delayed_work_sync(&ctrl->poll_cc); + + nvmet_pci_epf_disable_ctrl(ctrl); +} + +static void nvmet_pci_epf_destroy_ctrl(struct nvmet_pci_epf_ctrl *ctrl) +{ + if (!ctrl->tctrl) + return; + + dev_info(ctrl->dev, "Destroying PCI ctrl \"%s\"\n", + ctrl->tctrl->subsys->subsysnqn); + + nvmet_pci_epf_stop_ctrl(ctrl); + + nvmet_pci_epf_free_queues(ctrl); + nvmet_pci_epf_free_irq_vectors(ctrl); + + nvmet_ctrl_put(ctrl->tctrl); + ctrl->tctrl = NULL; + + mempool_exit(&ctrl->iod_pool); +} + +static int nvmet_pci_epf_configure_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + size_t reg_size, reg_bar_size; + size_t msix_table_size = 0; + + /* + * The first free BAR will be our register BAR and per NVMe + * specifications, it must be BAR 0. + */ + if (pci_epc_get_first_free_bar(epc_features) != BAR_0) { + dev_err(&epf->dev, "BAR 0 is not free\n"); + return -ENODEV; + } + + if (epc_features->bar[BAR_0].only_64bit) + epf->bar[BAR_0].flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; + + /* + * Calculate the size of the register bar: NVMe registers first with + * enough space for the doorbells, followed by the MSI-X table + * if supported. + */ + reg_size = NVME_REG_DBS + (NVMET_NR_QUEUES * 2 * sizeof(u32)); + reg_size = ALIGN(reg_size, 8); + + if (epc_features->msix_capable) { + size_t pba_size; + + msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; + nvme_epf->msix_table_offset = reg_size; + pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); + + reg_size += msix_table_size + pba_size; + } + + if (epc_features->bar[BAR_0].type == BAR_FIXED) { + if (reg_size > epc_features->bar[BAR_0].fixed_size) { + dev_err(&epf->dev, + "BAR 0 size %llu B too small, need %zu B\n", + epc_features->bar[BAR_0].fixed_size, + reg_size); + return -ENOMEM; + } + reg_bar_size = epc_features->bar[BAR_0].fixed_size; + } else { + reg_bar_size = ALIGN(reg_size, max(epc_features->align, 4096)); + } + + nvme_epf->reg_bar = pci_epf_alloc_space(epf, reg_bar_size, BAR_0, + epc_features, PRIMARY_INTERFACE); + if (!nvme_epf->reg_bar) { + dev_err(&epf->dev, "Failed to allocate BAR 0\n"); + return -ENOMEM; + } + memset(nvme_epf->reg_bar, 0, reg_bar_size); + + return 0; +} + +static void nvmet_pci_epf_free_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + + if (!nvme_epf->reg_bar) + return; + + pci_epf_free_space(epf, nvme_epf->reg_bar, BAR_0, PRIMARY_INTERFACE); + nvme_epf->reg_bar = NULL; +} + +static void nvmet_pci_epf_clear_bar(struct nvmet_pci_epf *nvme_epf) +{ + struct pci_epf *epf = nvme_epf->epf; + + pci_epc_clear_bar(epf->epc, epf->func_no, epf->vfunc_no, + &epf->bar[BAR_0]); +} + +static int nvmet_pci_epf_init_irq(struct nvmet_pci_epf *nvme_epf) +{ + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + struct pci_epf *epf = nvme_epf->epf; + int ret; + + /* Enable MSI-X if supported, otherwise, use MSI. */ + if (epc_features->msix_capable && epf->msix_interrupts) { + ret = pci_epc_set_msix(epf->epc, epf->func_no, epf->vfunc_no, + epf->msix_interrupts, BAR_0, + nvme_epf->msix_table_offset); + if (ret) { + dev_err(&epf->dev, "Failed to configure MSI-X\n"); + return ret; + } + + nvme_epf->nr_vectors = epf->msix_interrupts; + nvme_epf->irq_type = PCI_IRQ_MSIX; + + return 0; + } + + if (epc_features->msi_capable && epf->msi_interrupts) { + ret = pci_epc_set_msi(epf->epc, epf->func_no, epf->vfunc_no, + epf->msi_interrupts); + if (ret) { + dev_err(&epf->dev, "Failed to configure MSI\n"); + return ret; + } + + nvme_epf->nr_vectors = epf->msi_interrupts; + nvme_epf->irq_type = PCI_IRQ_MSI; + + return 0; + } + + /* MSI and MSI-X are not supported: fall back to INTx. */ + nvme_epf->nr_vectors = 1; + nvme_epf->irq_type = PCI_IRQ_INTX; + + return 0; +} + +static int nvmet_pci_epf_epc_init(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features = nvme_epf->epc_features; + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + unsigned int max_nr_queues = NVMET_NR_QUEUES; + int ret; + + /* For now, do not support virtual functions. */ + if (epf->vfunc_no > 0) { + dev_err(&epf->dev, "Virtual functions are not supported\n"); + return -EINVAL; + } + + /* + * Cap the maximum number of queues we can support on the controller + * with the number of IRQs we can use. + */ + if (epc_features->msix_capable && epf->msix_interrupts) { + dev_info(&epf->dev, + "PCI endpoint controller supports MSI-X, %u vectors\n", + epf->msix_interrupts); + max_nr_queues = min(max_nr_queues, epf->msix_interrupts); + } else if (epc_features->msi_capable && epf->msi_interrupts) { + dev_info(&epf->dev, + "PCI endpoint controller supports MSI, %u vectors\n", + epf->msi_interrupts); + max_nr_queues = min(max_nr_queues, epf->msi_interrupts); + } + + if (max_nr_queues < 2) { + dev_err(&epf->dev, "Invalid maximum number of queues %u\n", + max_nr_queues); + return -EINVAL; + } + + /* Create the target controller. */ + ret = nvmet_pci_epf_create_ctrl(nvme_epf, max_nr_queues); + if (ret) { + dev_err(&epf->dev, + "Failed to create NVMe PCI target controller (err=%d)\n", + ret); + return ret; + } + + /* Set device ID, class, etc. */ + epf->header->vendorid = ctrl->tctrl->subsys->vendor_id; + epf->header->subsys_vendor_id = ctrl->tctrl->subsys->subsys_vendor_id; + ret = pci_epc_write_header(epf->epc, epf->func_no, epf->vfunc_no, + epf->header); + if (ret) { + dev_err(&epf->dev, + "Failed to write configuration header (err=%d)\n", ret); + goto out_destroy_ctrl; + } + + ret = pci_epc_set_bar(epf->epc, epf->func_no, epf->vfunc_no, + &epf->bar[BAR_0]); + if (ret) { + dev_err(&epf->dev, "Failed to set BAR 0 (err=%d)\n", ret); + goto out_destroy_ctrl; + } + + /* + * Enable interrupts and start polling the controller BAR if we do not + * have a link up notifier. + */ + ret = nvmet_pci_epf_init_irq(nvme_epf); + if (ret) + goto out_clear_bar; + + if (!epc_features->linkup_notifier) { + ctrl->link_up = true; + nvmet_pci_epf_start_ctrl(&nvme_epf->ctrl); + } + + return 0; + +out_clear_bar: + nvmet_pci_epf_clear_bar(nvme_epf); +out_destroy_ctrl: + nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); + return ret; +} + +static void nvmet_pci_epf_epc_deinit(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + ctrl->link_up = false; + nvmet_pci_epf_destroy_ctrl(ctrl); + + nvmet_pci_epf_deinit_dma(nvme_epf); + nvmet_pci_epf_clear_bar(nvme_epf); +} + +static int nvmet_pci_epf_link_up(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + ctrl->link_up = true; + nvmet_pci_epf_start_ctrl(ctrl); + + return 0; +} + +static int nvmet_pci_epf_link_down(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct nvmet_pci_epf_ctrl *ctrl = &nvme_epf->ctrl; + + ctrl->link_up = false; + nvmet_pci_epf_stop_ctrl(ctrl); + + return 0; +} + +static const struct pci_epc_event_ops nvmet_pci_epf_event_ops = { + .epc_init = nvmet_pci_epf_epc_init, + .epc_deinit = nvmet_pci_epf_epc_deinit, + .link_up = nvmet_pci_epf_link_up, + .link_down = nvmet_pci_epf_link_down, +}; + +static int nvmet_pci_epf_bind(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + const struct pci_epc_features *epc_features; + struct pci_epc *epc = epf->epc; + int ret; + + if (WARN_ON_ONCE(!epc)) + return -EINVAL; + + epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); + if (!epc_features) { + dev_err(&epf->dev, "epc_features not implemented\n"); + return -EOPNOTSUPP; + } + nvme_epf->epc_features = epc_features; + + ret = nvmet_pci_epf_configure_bar(nvme_epf); + if (ret) + return ret; + + nvmet_pci_epf_init_dma(nvme_epf); + + return 0; +} + +static void nvmet_pci_epf_unbind(struct pci_epf *epf) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + struct pci_epc *epc = epf->epc; + + nvmet_pci_epf_destroy_ctrl(&nvme_epf->ctrl); + + if (epc->init_complete) { + nvmet_pci_epf_deinit_dma(nvme_epf); + nvmet_pci_epf_clear_bar(nvme_epf); + } + + nvmet_pci_epf_free_bar(nvme_epf); +} + +static struct pci_epf_header nvme_epf_pci_header = { + .vendorid = PCI_ANY_ID, + .deviceid = PCI_ANY_ID, + .progif_code = 0x02, /* NVM Express */ + .baseclass_code = PCI_BASE_CLASS_STORAGE, + .subclass_code = 0x08, /* Non-Volatile Memory controller */ + .interrupt_pin = PCI_INTERRUPT_INTA, +}; + +static int nvmet_pci_epf_probe(struct pci_epf *epf, + const struct pci_epf_device_id *id) +{ + struct nvmet_pci_epf *nvme_epf; + int ret; + + nvme_epf = devm_kzalloc(&epf->dev, sizeof(*nvme_epf), GFP_KERNEL); + if (!nvme_epf) + return -ENOMEM; + + ret = devm_mutex_init(&epf->dev, &nvme_epf->mmio_lock); + if (ret) + return ret; + + nvme_epf->epf = epf; + nvme_epf->mdts_kb = NVMET_PCI_EPF_MDTS_KB; + + epf->event_ops = &nvmet_pci_epf_event_ops; + epf->header = &nvme_epf_pci_header; + epf_set_drvdata(epf, nvme_epf); + + return 0; +} + +#define to_nvme_epf(epf_group) \ + container_of(epf_group, struct nvmet_pci_epf, group) + +static ssize_t nvmet_pci_epf_portid_show(struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%u\n", le16_to_cpu(nvme_epf->portid)); +} + +static ssize_t nvmet_pci_epf_portid_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + u16 portid; + + /* Do not allow setting this when the function is already started. */ + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + if (!len) + return -EINVAL; + + if (kstrtou16(page, 0, &portid)) + return -EINVAL; + + nvme_epf->portid = cpu_to_le16(portid); + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, portid); + +static ssize_t nvmet_pci_epf_subsysnqn_show(struct config_item *item, + char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%s\n", nvme_epf->subsysnqn); +} + +static ssize_t nvmet_pci_epf_subsysnqn_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + /* Do not allow setting this when the function is already started. */ + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + if (!len) + return -EINVAL; + + strscpy(nvme_epf->subsysnqn, page, len); + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, subsysnqn); + +static ssize_t nvmet_pci_epf_mdts_kb_show(struct config_item *item, char *page) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + + return sysfs_emit(page, "%u\n", nvme_epf->mdts_kb); +} + +static ssize_t nvmet_pci_epf_mdts_kb_store(struct config_item *item, + const char *page, size_t len) +{ + struct config_group *group = to_config_group(item); + struct nvmet_pci_epf *nvme_epf = to_nvme_epf(group); + unsigned long mdts_kb; + int ret; + + if (nvme_epf->ctrl.tctrl) + return -EBUSY; + + ret = kstrtoul(page, 0, &mdts_kb); + if (ret) + return ret; + if (!mdts_kb) + mdts_kb = NVMET_PCI_EPF_MDTS_KB; + else if (mdts_kb > NVMET_PCI_EPF_MAX_MDTS_KB) + mdts_kb = NVMET_PCI_EPF_MAX_MDTS_KB; + + if (!is_power_of_2(mdts_kb)) + return -EINVAL; + + nvme_epf->mdts_kb = mdts_kb; + + return len; +} + +CONFIGFS_ATTR(nvmet_pci_epf_, mdts_kb); + +static struct configfs_attribute *nvmet_pci_epf_attrs[] = { + &nvmet_pci_epf_attr_portid, + &nvmet_pci_epf_attr_subsysnqn, + &nvmet_pci_epf_attr_mdts_kb, + NULL, +}; + +static const struct config_item_type nvmet_pci_epf_group_type = { + .ct_attrs = nvmet_pci_epf_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *nvmet_pci_epf_add_cfs(struct pci_epf *epf, + struct config_group *group) +{ + struct nvmet_pci_epf *nvme_epf = epf_get_drvdata(epf); + + config_group_init_type_name(&nvme_epf->group, "nvme", + &nvmet_pci_epf_group_type); + + return &nvme_epf->group; +} + +static const struct pci_epf_device_id nvmet_pci_epf_ids[] = { + { .name = "nvmet_pci_epf" }, + {}, +}; + +static struct pci_epf_ops nvmet_pci_epf_ops = { + .bind = nvmet_pci_epf_bind, + .unbind = nvmet_pci_epf_unbind, + .add_cfs = nvmet_pci_epf_add_cfs, +}; + +static struct pci_epf_driver nvmet_pci_epf_driver = { + .driver.name = "nvmet_pci_epf", + .probe = nvmet_pci_epf_probe, + .id_table = nvmet_pci_epf_ids, + .ops = &nvmet_pci_epf_ops, + .owner = THIS_MODULE, +}; + +static int __init nvmet_pci_epf_init_module(void) +{ + int ret; + + ret = pci_epf_register_driver(&nvmet_pci_epf_driver); + if (ret) + return ret; + + ret = nvmet_register_transport(&nvmet_pci_epf_fabrics_ops); + if (ret) { + pci_epf_unregister_driver(&nvmet_pci_epf_driver); + return ret; + } + + return 0; +} + +static void __exit nvmet_pci_epf_cleanup_module(void) +{ + nvmet_unregister_transport(&nvmet_pci_epf_fabrics_ops); + pci_epf_unregister_driver(&nvmet_pci_epf_driver); +} + +module_init(nvmet_pci_epf_init_module); +module_exit(nvmet_pci_epf_cleanup_module); + +MODULE_DESCRIPTION("NVMe PCI Endpoint Function target driver"); +MODULE_AUTHOR("Damien Le Moal <dlemoal@kernel.org>"); +MODULE_LICENSE("GPL"); |