diff options
Diffstat (limited to 'drivers/ntb/ntb_transport.c')
| -rw-r--r-- | drivers/ntb/ntb_transport.c | 2039 |
1 files changed, 1537 insertions, 502 deletions
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index f8d7081ee301..eb875e3db2e3 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -5,6 +5,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -13,6 +14,7 @@ * BSD LICENSE * * Copyright(c) 2012 Intel Corporation. All rights reserved. + * Copyright (C) 2015 EMC Corporation. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -40,13 +42,14 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * - * Intel PCIe NTB Linux driver + * PCIe NTB Transport Linux driver * * Contact Information: * Jon Mason <jon.mason@intel.com> */ #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/export.h> @@ -55,27 +58,71 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/ntb.h> -#include "ntb_hw.h" - -#define NTB_TRANSPORT_VERSION 3 - -static unsigned int transport_mtu = 0x401E; +#include <linux/uaccess.h> +#include <linux/mutex.h> +#include "linux/ntb.h" +#include "linux/ntb_transport.h" + +#define NTB_TRANSPORT_VERSION 4 +#define NTB_TRANSPORT_VER "4" +#define NTB_TRANSPORT_NAME "ntb_transport" +#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB" +#define NTB_TRANSPORT_MIN_SPADS (MW0_SZ_HIGH + 2) + +MODULE_DESCRIPTION(NTB_TRANSPORT_DESC); +MODULE_VERSION(NTB_TRANSPORT_VER); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corporation"); + +static unsigned long max_mw_size; +module_param(max_mw_size, ulong, 0644); +MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows"); + +static unsigned int transport_mtu = 0x10000; module_param(transport_mtu, uint, 0644); MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets"); -static unsigned char max_num_clients = 2; +static unsigned char max_num_clients; module_param(max_num_clients, byte, 0644); MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients"); +static unsigned int copy_bytes = 1024; +module_param(copy_bytes, uint, 0644); +MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA"); + +static bool use_dma; +module_param(use_dma, bool, 0644); +MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); + +static bool use_msi; +#ifdef CONFIG_NTB_MSI +module_param(use_msi, bool, 0644); +MODULE_PARM_DESC(use_msi, "Use MSI interrupts instead of doorbells"); +#endif + +static struct dentry *nt_debugfs_dir; + +/* Only two-ports NTB devices are supported */ +#define PIDX NTB_DEF_PEER_IDX + struct ntb_queue_entry { /* ntb_queue list reference */ struct list_head entry; - /* pointers to data to be transfered */ + /* pointers to data to be transferred */ void *cb_data; void *buf; unsigned int len; unsigned int flags; + int retries; + int errors; + unsigned int tx_index; + unsigned int rx_index; + + struct ntb_transport_qp *qp; + union { + struct ntb_payload_header __iomem *tx_hdr; + struct ntb_payload_header *rx_hdr; + }; }; struct ntb_rx_info { @@ -83,39 +130,50 @@ struct ntb_rx_info { }; struct ntb_transport_qp { - struct ntb_transport *transport; - struct ntb_device *ndev; + struct ntb_transport_ctx *transport; + struct ntb_dev *ndev; void *cb_data; + struct dma_chan *tx_dma_chan; + struct dma_chan *rx_dma_chan; bool client_ready; - bool qp_link; + bool link_is_up; + bool active; + u8 qp_num; /* Only 64 QP's are allowed. 0-63 */ + u64 qp_bit; struct ntb_rx_info __iomem *rx_info; struct ntb_rx_info *remote_rx_info; - void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data, - void *data, int len); + void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, + void *data, int len); struct list_head tx_free_q; spinlock_t ntb_tx_free_q_lock; void __iomem *tx_mw; + phys_addr_t tx_mw_phys; + size_t tx_mw_size; + dma_addr_t tx_mw_dma_addr; unsigned int tx_index; unsigned int tx_max_entry; unsigned int tx_max_frame; - void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data, - void *data, int len); - struct tasklet_struct rx_work; + void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, + void *data, int len); + struct list_head rx_post_q; struct list_head rx_pend_q; struct list_head rx_free_q; - spinlock_t ntb_rx_pend_q_lock; - spinlock_t ntb_rx_free_q_lock; + /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ + spinlock_t ntb_rx_q_lock; void *rx_buff; unsigned int rx_index; unsigned int rx_max_entry; unsigned int rx_max_frame; + unsigned int rx_alloc_entry; + dma_cookie_t last_cookie; + struct tasklet_struct rxc_db_work; - void (*event_handler) (void *data, int status); + void (*event_handler)(void *data, int status); struct delayed_work link_work; struct work_struct link_cleanup; @@ -129,40 +187,69 @@ struct ntb_transport_qp { u64 rx_err_no_buf; u64 rx_err_oflow; u64 rx_err_ver; + u64 rx_memcpy; + u64 rx_async; u64 tx_bytes; u64 tx_pkts; u64 tx_ring_full; + u64 tx_err_no_buf; + u64 tx_memcpy; + u64 tx_async; + + bool use_msi; + int msi_irq; + struct ntb_msi_desc msi_desc; + struct ntb_msi_desc peer_msi_desc; }; struct ntb_transport_mw { - size_t size; + phys_addr_t phys_addr; + resource_size_t phys_size; + void __iomem *vbase; + size_t xlat_size; + size_t buff_size; + size_t alloc_size; + void *alloc_addr; void *virt_addr; dma_addr_t dma_addr; }; struct ntb_transport_client_dev { struct list_head entry; + struct ntb_transport_ctx *nt; struct device dev; }; -struct ntb_transport { +struct ntb_transport_ctx { struct list_head entry; struct list_head client_devs; - struct ntb_device *ndev; - struct ntb_transport_mw mw[NTB_NUM_MW]; - struct ntb_transport_qp *qps; - unsigned int max_qps; - unsigned long qp_bitmap; - bool transport_link; + struct ntb_dev *ndev; + + struct ntb_transport_mw *mw_vec; + struct ntb_transport_qp *qp_vec; + unsigned int mw_count; + unsigned int qp_count; + u64 qp_bitmap; + u64 qp_bitmap_free; + + bool use_msi; + unsigned int msi_spad_offset; + u64 msi_db_mask; + + bool link_is_up; struct delayed_work link_work; struct work_struct link_cleanup; - struct dentry *debugfs_dir; + + struct dentry *debugfs_node_dir; + + /* Make sure workq of link event be executed serially */ + struct mutex link_event_lock; }; enum { - DESC_DONE_FLAG = 1 << 0, - LINK_DOWN_FLAG = 1 << 1, + DESC_DONE_FLAG = BIT(0), + LINK_DOWN_FLAG = BIT(1), }; struct ntb_payload_header { @@ -178,73 +265,75 @@ enum { NUM_MWS, MW0_SZ_HIGH, MW0_SZ_LOW, - MW1_SZ_HIGH, - MW1_SZ_LOW, - MAX_SPAD, }; -#define QP_TO_MW(qp) ((qp) % NTB_NUM_MW) +#define dev_client_dev(__dev) \ + container_of((__dev), struct ntb_transport_client_dev, dev) + +#define drv_client(__drv) \ + container_of((__drv), struct ntb_transport_client, driver) + +#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 -static int ntb_match_bus(struct device *dev, struct device_driver *drv) +static void ntb_transport_rxc_db(unsigned long data); +static const struct ntb_ctx_ops ntb_transport_ops; +static struct ntb_client ntb_transport_client; +static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry); +static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset); +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset); +static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset); + + +static int ntb_transport_bus_match(struct device *dev, + const struct device_driver *drv) { return !strncmp(dev_name(dev), drv->name, strlen(drv->name)); } -static int ntb_client_probe(struct device *dev) +static int ntb_transport_bus_probe(struct device *dev) { - const struct ntb_client *drv = container_of(dev->driver, - struct ntb_client, driver); - struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev); - int rc = -EINVAL; + const struct ntb_transport_client *client; + int rc; get_device(dev); - if (drv && drv->probe) - rc = drv->probe(pdev); + + client = drv_client(dev->driver); + rc = client->probe(dev); if (rc) put_device(dev); return rc; } -static int ntb_client_remove(struct device *dev) +static void ntb_transport_bus_remove(struct device *dev) { - const struct ntb_client *drv = container_of(dev->driver, - struct ntb_client, driver); - struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev); + const struct ntb_transport_client *client; - if (drv && drv->remove) - drv->remove(pdev); + client = drv_client(dev->driver); + client->remove(dev); put_device(dev); - - return 0; } -static struct bus_type ntb_bus_type = { - .name = "ntb_bus", - .match = ntb_match_bus, - .probe = ntb_client_probe, - .remove = ntb_client_remove, +static const struct bus_type ntb_transport_bus = { + .name = "ntb_transport", + .match = ntb_transport_bus_match, + .probe = ntb_transport_bus_probe, + .remove = ntb_transport_bus_remove, }; static LIST_HEAD(ntb_transport_list); -static int ntb_bus_init(struct ntb_transport *nt) +static int ntb_bus_init(struct ntb_transport_ctx *nt) { - if (list_empty(&ntb_transport_list)) { - int rc = bus_register(&ntb_bus_type); - if (rc) - return rc; - } - - list_add(&nt->entry, &ntb_transport_list); - + list_add_tail(&nt->entry, &ntb_transport_list); return 0; } -static void ntb_bus_remove(struct ntb_transport *nt) +static void ntb_bus_remove(struct ntb_transport_ctx *nt) { struct ntb_transport_client_dev *client_dev, *cd; @@ -256,29 +345,26 @@ static void ntb_bus_remove(struct ntb_transport *nt) } list_del(&nt->entry); - - if (list_empty(&ntb_transport_list)) - bus_unregister(&ntb_bus_type); } -static void ntb_client_release(struct device *dev) +static void ntb_transport_client_release(struct device *dev) { struct ntb_transport_client_dev *client_dev; - client_dev = container_of(dev, struct ntb_transport_client_dev, dev); + client_dev = dev_client_dev(dev); kfree(client_dev); } /** - * ntb_unregister_client_dev - Unregister NTB client device + * ntb_transport_unregister_client_dev - Unregister NTB client device * @device_name: Name of NTB client device * * Unregister an NTB client device with the NTB transport layer */ -void ntb_unregister_client_dev(char *device_name) +void ntb_transport_unregister_client_dev(char *device_name) { struct ntb_transport_client_dev *client, *cd; - struct ntb_transport *nt; + struct ntb_transport_ctx *nt; list_for_each_entry(nt, &ntb_transport_list, entry) list_for_each_entry_safe(client, cd, &nt->client_devs, entry) @@ -288,18 +374,21 @@ void ntb_unregister_client_dev(char *device_name) device_unregister(&client->dev); } } -EXPORT_SYMBOL_GPL(ntb_unregister_client_dev); +EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev); /** - * ntb_register_client_dev - Register NTB client device + * ntb_transport_register_client_dev - Register NTB client device * @device_name: Name of NTB client device * * Register an NTB client device with the NTB transport layer + * + * Returns: %0 on success or -errno code on error */ -int ntb_register_client_dev(char *device_name) +int ntb_transport_register_client_dev(char *device_name) { struct ntb_transport_client_dev *client_dev; - struct ntb_transport *nt; + struct ntb_transport_ctx *nt; + int node; int rc, i = 0; if (list_empty(&ntb_transport_list)) @@ -308,8 +397,10 @@ int ntb_register_client_dev(char *device_name) list_for_each_entry(nt, &ntb_transport_list, entry) { struct device *dev; - client_dev = kzalloc(sizeof(struct ntb_transport_client_dev), - GFP_KERNEL); + node = dev_to_node(&nt->ndev->dev); + + client_dev = kzalloc_node(sizeof(*client_dev), + GFP_KERNEL, node); if (!client_dev) { rc = -ENOMEM; goto err; @@ -319,13 +410,13 @@ int ntb_register_client_dev(char *device_name) /* setup and register client devices */ dev_set_name(dev, "%s%d", device_name, i); - dev->bus = &ntb_bus_type; - dev->release = ntb_client_release; - dev->parent = &ntb_query_pdev(nt->ndev)->dev; + dev->bus = &ntb_transport_bus; + dev->release = ntb_transport_client_release; + dev->parent = &nt->ndev->dev; rc = device_register(dev); if (rc) { - kfree(client_dev); + put_device(dev); goto err; } @@ -336,44 +427,44 @@ int ntb_register_client_dev(char *device_name) return 0; err: - ntb_unregister_client_dev(device_name); + ntb_transport_unregister_client_dev(device_name); return rc; } -EXPORT_SYMBOL_GPL(ntb_register_client_dev); +EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev); /** - * ntb_register_client - Register NTB client driver + * ntb_transport_register_client - Register NTB client driver * @drv: NTB client driver to be registered * * Register an NTB client driver with the NTB transport layer * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ -int ntb_register_client(struct ntb_client *drv) +int ntb_transport_register_client(struct ntb_transport_client *drv) { - drv->driver.bus = &ntb_bus_type; + drv->driver.bus = &ntb_transport_bus; if (list_empty(&ntb_transport_list)) return -ENODEV; return driver_register(&drv->driver); } -EXPORT_SYMBOL_GPL(ntb_register_client); +EXPORT_SYMBOL_GPL(ntb_transport_register_client); /** - * ntb_unregister_client - Unregister NTB client driver + * ntb_transport_unregister_client - Unregister NTB client driver * @drv: NTB client driver to be unregistered * * Unregister an NTB client driver with the NTB transport layer * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ -void ntb_unregister_client(struct ntb_client *drv) +void ntb_transport_unregister_client(struct ntb_transport_client *drv) { driver_unregister(&drv->driver); } -EXPORT_SYMBOL_GPL(ntb_unregister_client); +EXPORT_SYMBOL_GPL(ntb_transport_unregister_client); static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) @@ -382,51 +473,84 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count, char *buf; ssize_t ret, out_offset, out_count; - out_count = 600; + qp = filp->private_data; + + if (!qp || !qp->link_is_up) + return 0; + + out_count = 1000; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; - qp = filp->private_data; out_offset = 0; - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "NTB QP stats\n"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "\nNTB QP stats:\n\n"); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_bytes - \t%llu\n", qp->rx_bytes); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_pkts - \t%llu\n", qp->rx_pkts); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "rx_memcpy - \t%llu\n", qp->rx_memcpy); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "rx_async - \t%llu\n", qp->rx_async); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_ring_empty - %llu\n", qp->rx_ring_empty); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_no_buf - %llu\n", qp->rx_err_no_buf); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_oflow - \t%llu\n", qp->rx_err_oflow); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_err_ver - \t%llu\n", qp->rx_err_ver); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "rx_buff - \t%p\n", qp->rx_buff); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "rx_buff - \t0x%p\n", qp->rx_buff); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_index - \t%u\n", qp->rx_index); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "rx_max_entry - \t%u\n", qp->rx_max_entry); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_bytes - \t%llu\n", qp->tx_bytes); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_pkts - \t%llu\n", qp->tx_pkts); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "tx_memcpy - \t%llu\n", qp->tx_memcpy); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "tx_async - \t%llu\n", qp->tx_async); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_ring_full - \t%llu\n", qp->tx_ring_full); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "tx_mw - \t%p\n", qp->tx_mw); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "tx_index - \t%u\n", qp->tx_index); - out_offset += snprintf(buf + out_offset, out_count - out_offset, + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "tx_err_no_buf - %llu\n", qp->tx_err_no_buf); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "tx_mw - \t0x%p\n", qp->tx_mw); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "tx_index (H) - \t%u\n", qp->tx_index); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "RRI (T) - \t%u\n", + qp->remote_rx_info->entry); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, "tx_max_entry - \t%u\n", qp->tx_max_entry); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "free tx - \t%u\n", + ntb_transport_tx_free_entry(qp)); + + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "\n"); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "Using TX DMA - \t%s\n", + qp->tx_dma_chan ? "Yes" : "No"); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "Using RX DMA - \t%s\n", + qp->rx_dma_chan ? "Yes" : "No"); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "QP Link - \t%s\n", + qp->link_is_up ? "Up" : "Down"); + out_offset += scnprintf(buf + out_offset, out_count - out_offset, + "\n"); - out_offset += snprintf(buf + out_offset, out_count - out_offset, - "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ? - "Up" : "Down"); if (out_offset > out_count) out_offset = out_count; @@ -452,7 +576,7 @@ static void ntb_list_add(spinlock_t *lock, struct list_head *entry, } static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, - struct list_head *list) + struct list_head *list) { struct ntb_queue_entry *entry; unsigned long flags; @@ -464,114 +588,400 @@ static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock, } entry = list_first_entry(list, struct ntb_queue_entry, entry); list_del(&entry->entry); + out: spin_unlock_irqrestore(lock, flags); return entry; } -static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, - unsigned int qp_num) +static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock, + struct list_head *list, + struct list_head *to_list) { - struct ntb_transport_qp *qp = &nt->qps[qp_num]; + struct ntb_queue_entry *entry; + unsigned long flags; + + spin_lock_irqsave(lock, flags); + + if (list_empty(list)) { + entry = NULL; + } else { + entry = list_first_entry(list, struct ntb_queue_entry, entry); + list_move_tail(&entry->entry, to_list); + } + + spin_unlock_irqrestore(lock, flags); + + return entry; +} + +static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, + unsigned int qp_num) +{ + struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; + struct ntb_transport_mw *mw; + struct ntb_dev *ndev = nt->ndev; + struct ntb_queue_entry *entry; unsigned int rx_size, num_qps_mw; - u8 mw_num = QP_TO_MW(qp_num); + unsigned int mw_num, mw_count, qp_count; unsigned int i; + int node; - WARN_ON(nt->mw[mw_num].virt_addr == NULL); + mw_count = nt->mw_count; + qp_count = nt->qp_count; + + mw_num = QP_TO_MW(nt, qp_num); + mw = &nt->mw_vec[mw_num]; + + if (!mw->virt_addr) + return -ENOMEM; - if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) - num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; + if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; else - num_qps_mw = nt->max_qps / NTB_NUM_MW; + num_qps_mw = qp_count / mw_count; - rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw; - qp->remote_rx_info = nt->mw[mw_num].virt_addr + - (qp_num / NTB_NUM_MW * rx_size); + rx_size = (unsigned int)mw->xlat_size / num_qps_mw; + qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); rx_size -= sizeof(struct ntb_rx_info); - qp->rx_buff = qp->remote_rx_info + 1; + qp->remote_rx_info = qp->rx_buff + rx_size; + /* Due to housekeeping, there must be atleast 2 buffs */ qp->rx_max_frame = min(transport_mtu, rx_size / 2); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; + /* + * Checking to see if we have more entries than the default. + * We should add additional entries if that is the case so we + * can be in sync with the transport frames. + */ + node = dev_to_node(&ndev->dev); + for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) { + entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); + if (!entry) + return -ENOMEM; + + entry->qp = qp; + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, + &qp->rx_free_q); + qp->rx_alloc_entry++; + } + qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* setup the hdr offsets with 0's */ for (i = 0; i < qp->rx_max_entry; i++) { - void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) - - sizeof(struct ntb_payload_header); + void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) - + sizeof(struct ntb_payload_header)); memset(offset, 0, sizeof(struct ntb_payload_header)); } qp->rx_pkts = 0; qp->tx_pkts = 0; qp->tx_index = 0; + + return 0; +} + +static irqreturn_t ntb_transport_isr(int irq, void *dev) +{ + struct ntb_transport_qp *qp = dev; + + tasklet_schedule(&qp->rxc_db_work); + + return IRQ_HANDLED; +} + +static void ntb_transport_setup_qp_peer_msi(struct ntb_transport_ctx *nt, + unsigned int qp_num) +{ + struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; + int spad = qp_num * 2 + nt->msi_spad_offset; + + if (!nt->use_msi) + return; + + if (spad >= ntb_spad_count(nt->ndev)) + return; + + qp->peer_msi_desc.addr_offset = + ntb_peer_spad_read(qp->ndev, PIDX, spad); + qp->peer_msi_desc.data = + ntb_peer_spad_read(qp->ndev, PIDX, spad + 1); + + dev_dbg(&qp->ndev->pdev->dev, "QP%d Peer MSI addr=%x data=%x\n", + qp_num, qp->peer_msi_desc.addr_offset, qp->peer_msi_desc.data); + + if (qp->peer_msi_desc.addr_offset) { + qp->use_msi = true; + dev_info(&qp->ndev->pdev->dev, + "Using MSI interrupts for QP%d\n", qp_num); + } +} + +static void ntb_transport_setup_qp_msi(struct ntb_transport_ctx *nt, + unsigned int qp_num) +{ + struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; + int spad = qp_num * 2 + nt->msi_spad_offset; + int rc; + + if (!nt->use_msi) + return; + + if (spad >= ntb_spad_count(nt->ndev)) { + dev_warn_once(&qp->ndev->pdev->dev, + "Not enough SPADS to use MSI interrupts\n"); + return; + } + + ntb_spad_write(qp->ndev, spad, 0); + ntb_spad_write(qp->ndev, spad + 1, 0); + + if (!qp->msi_irq) { + qp->msi_irq = ntbm_msi_request_irq(qp->ndev, ntb_transport_isr, + KBUILD_MODNAME, qp, + &qp->msi_desc); + if (qp->msi_irq < 0) { + dev_warn(&qp->ndev->pdev->dev, + "Unable to allocate MSI interrupt for qp%d\n", + qp_num); + return; + } + } + + rc = ntb_spad_write(qp->ndev, spad, qp->msi_desc.addr_offset); + if (rc) + goto err_free_interrupt; + + rc = ntb_spad_write(qp->ndev, spad + 1, qp->msi_desc.data); + if (rc) + goto err_free_interrupt; + + dev_dbg(&qp->ndev->pdev->dev, "QP%d MSI %d addr=%x data=%x\n", + qp_num, qp->msi_irq, qp->msi_desc.addr_offset, + qp->msi_desc.data); + + return; + +err_free_interrupt: + devm_free_irq(&nt->ndev->dev, qp->msi_irq, qp); +} + +static void ntb_transport_msi_peer_desc_changed(struct ntb_transport_ctx *nt) +{ + int i; + + dev_dbg(&nt->ndev->pdev->dev, "Peer MSI descriptors changed"); + + for (i = 0; i < nt->qp_count; i++) + ntb_transport_setup_qp_peer_msi(nt, i); +} + +static void ntb_transport_msi_desc_changed(void *data) +{ + struct ntb_transport_ctx *nt = data; + int i; + + dev_dbg(&nt->ndev->pdev->dev, "MSI descriptors changed"); + + for (i = 0; i < nt->qp_count; i++) + ntb_transport_setup_qp_msi(nt, i); + + ntb_peer_db_set(nt->ndev, nt->msi_db_mask); } -static void ntb_free_mw(struct ntb_transport *nt, int num_mw) +static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) { - struct ntb_transport_mw *mw = &nt->mw[num_mw]; - struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; + struct pci_dev *pdev = nt->ndev->pdev; if (!mw->virt_addr) return; - dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); + ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); + dma_free_coherent(&pdev->dev, mw->alloc_size, + mw->alloc_addr, mw->dma_addr); + mw->xlat_size = 0; + mw->buff_size = 0; + mw->alloc_size = 0; + mw->alloc_addr = NULL; mw->virt_addr = NULL; } -static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) +static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw, + struct device *ntb_dev, size_t align) { - struct ntb_transport_mw *mw = &nt->mw[num_mw]; - struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + dma_addr_t dma_addr; + void *alloc_addr, *virt_addr; + int rc; + + /* + * The buffer here is allocated against the NTB device. The reason to + * use dma_alloc_*() call is to allocate a large IOVA contiguous buffer + * backing the NTB BAR for the remote host to write to. During receive + * processing, the data is being copied out of the receive buffer to + * the kernel skbuff. When a DMA device is being used, dma_map_page() + * is called on the kvaddr of the receive buffer (from dma_alloc_*()) + * and remapped against the DMA device. It appears to be a double + * DMA mapping of buffers, but first is mapped to the NTB device and + * second is to the DMA device. DMA_ATTR_FORCE_CONTIGUOUS is necessary + * in order for the later dma_map_page() to not fail. + */ + alloc_addr = dma_alloc_attrs(ntb_dev, mw->alloc_size, + &dma_addr, GFP_KERNEL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (!alloc_addr) { + dev_err(ntb_dev, "Unable to alloc MW buff of size %zu\n", + mw->alloc_size); + return -ENOMEM; + } + virt_addr = alloc_addr; + + /* + * we must ensure that the memory address allocated is BAR size + * aligned in order for the XLAT register to take the value. This + * is a requirement of the hardware. It is recommended to setup CMA + * for BAR sizes equal or greater than 4MB. + */ + if (!IS_ALIGNED(dma_addr, align)) { + if (mw->alloc_size > mw->buff_size) { + virt_addr = PTR_ALIGN(alloc_addr, align); + dma_addr = ALIGN(dma_addr, align); + } else { + rc = -ENOMEM; + goto err; + } + } + + mw->alloc_addr = alloc_addr; + mw->virt_addr = virt_addr; + mw->dma_addr = dma_addr; + + return 0; + +err: + dma_free_coherent(ntb_dev, mw->alloc_size, alloc_addr, dma_addr); + + return rc; +} + +static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, + resource_size_t size) +{ + struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; + struct pci_dev *pdev = nt->ndev->pdev; + size_t xlat_size, buff_size; + resource_size_t xlat_align; + resource_size_t xlat_align_size; + int rc; + + if (!size) + return -EINVAL; + + rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, + &xlat_align_size, NULL); + if (rc) + return rc; + + xlat_size = round_up(size, xlat_align_size); + buff_size = round_up(size, xlat_align); /* No need to re-setup */ - if (mw->size == ALIGN(size, 4096)) + if (mw->xlat_size == xlat_size) return 0; - if (mw->size != 0) + if (mw->buff_size) ntb_free_mw(nt, num_mw); - /* Alloc memory for receiving data. Must be 4k aligned */ - mw->size = ALIGN(size, 4096); + /* Alloc memory for receiving data. Must be aligned */ + mw->xlat_size = xlat_size; + mw->buff_size = buff_size; + mw->alloc_size = buff_size; - mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, - GFP_KERNEL); - if (!mw->virt_addr) { - mw->size = 0; - dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", - (int) mw->size); - return -ENOMEM; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + mw->alloc_size *= 2; + rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align); + if (rc) { + dev_err(&pdev->dev, + "Unable to alloc aligned MW buff\n"); + mw->xlat_size = 0; + mw->buff_size = 0; + mw->alloc_size = 0; + return rc; + } } /* Notify HW the memory location of the receive buffer */ - ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); + rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr, + mw->xlat_size); + if (rc) { + dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); + ntb_free_mw(nt, num_mw); + return -EIO; + } return 0; } -static void ntb_qp_link_cleanup(struct work_struct *work) +static void ntb_qp_link_context_reset(struct ntb_transport_qp *qp) { - struct ntb_transport_qp *qp = container_of(work, - struct ntb_transport_qp, - link_cleanup); - struct ntb_transport *nt = qp->transport; - struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + qp->link_is_up = false; + qp->active = false; - if (qp->qp_link == NTB_LINK_DOWN) { - cancel_delayed_work_sync(&qp->link_work); - return; - } + qp->tx_index = 0; + qp->rx_index = 0; + qp->rx_bytes = 0; + qp->rx_pkts = 0; + qp->rx_ring_empty = 0; + qp->rx_err_no_buf = 0; + qp->rx_err_oflow = 0; + qp->rx_err_ver = 0; + qp->rx_memcpy = 0; + qp->rx_async = 0; + qp->tx_bytes = 0; + qp->tx_pkts = 0; + qp->tx_ring_full = 0; + qp->tx_err_no_buf = 0; + qp->tx_memcpy = 0; + qp->tx_async = 0; +} + +static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) +{ + ntb_qp_link_context_reset(qp); + if (qp->remote_rx_info) + qp->remote_rx_info->entry = qp->rx_max_entry - 1; +} + +static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) +{ + struct ntb_transport_ctx *nt = qp->transport; + struct pci_dev *pdev = nt->ndev->pdev; + + dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num); + + cancel_delayed_work_sync(&qp->link_work); + ntb_qp_link_down_reset(qp); if (qp->event_handler) - qp->event_handler(qp->cb_data, NTB_LINK_DOWN); + qp->event_handler(qp->cb_data, qp->link_is_up); +} - dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); - qp->qp_link = NTB_LINK_DOWN; +static void ntb_qp_link_cleanup_work(struct work_struct *work) +{ + struct ntb_transport_qp *qp = container_of(work, + struct ntb_transport_qp, + link_cleanup); + struct ntb_transport_ctx *nt = qp->transport; + + ntb_qp_link_cleanup(qp); - if (nt->transport_link == NTB_LINK_UP) + if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); } @@ -581,174 +991,159 @@ static void ntb_qp_link_down(struct ntb_transport_qp *qp) schedule_work(&qp->link_cleanup); } -static void ntb_transport_link_cleanup(struct work_struct *work) +static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { - struct ntb_transport *nt = container_of(work, struct ntb_transport, - link_cleanup); - int i; + struct ntb_transport_qp *qp; + u64 qp_bitmap_alloc; + unsigned int i, count; - if (nt->transport_link == NTB_LINK_DOWN) - cancel_delayed_work_sync(&nt->link_work); - else - nt->transport_link = NTB_LINK_DOWN; + qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; /* Pass along the info to any clients */ - for (i = 0; i < nt->max_qps; i++) - if (!test_bit(i, &nt->qp_bitmap)) - ntb_qp_link_down(&nt->qps[i]); + for (i = 0; i < nt->qp_count; i++) + if (qp_bitmap_alloc & BIT_ULL(i)) { + qp = &nt->qp_vec[i]; + ntb_qp_link_cleanup(qp); + cancel_work_sync(&qp->link_cleanup); + cancel_delayed_work_sync(&qp->link_work); + } + + if (!nt->link_is_up) + cancel_delayed_work_sync(&nt->link_work); + + for (i = 0; i < nt->mw_count; i++) + ntb_free_mw(nt, i); /* The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed */ - for (i = 0; i < MAX_SPAD; i++) - ntb_write_local_spad(nt->ndev, i, 0); + count = ntb_spad_count(nt->ndev); + for (i = 0; i < count; i++) + ntb_spad_write(nt->ndev, i, 0); } -static void ntb_transport_event_callback(void *data, enum ntb_hw_event event) +static void ntb_transport_link_cleanup_work(struct work_struct *work) { - struct ntb_transport *nt = data; + struct ntb_transport_ctx *nt = + container_of(work, struct ntb_transport_ctx, link_cleanup); + + guard(mutex)(&nt->link_event_lock); + ntb_transport_link_cleanup(nt); +} - switch (event) { - case NTB_EVENT_HW_LINK_UP: +static void ntb_transport_event_callback(void *data) +{ + struct ntb_transport_ctx *nt = data; + + if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1) schedule_delayed_work(&nt->link_work, 0); - break; - case NTB_EVENT_HW_LINK_DOWN: + else schedule_work(&nt->link_cleanup); - break; - default: - BUG(); - } } static void ntb_transport_link_work(struct work_struct *work) { - struct ntb_transport *nt = container_of(work, struct ntb_transport, - link_work.work); - struct ntb_device *ndev = nt->ndev; - struct pci_dev *pdev = ntb_query_pdev(ndev); + struct ntb_transport_ctx *nt = + container_of(work, struct ntb_transport_ctx, link_work.work); + struct ntb_dev *ndev = nt->ndev; + struct pci_dev *pdev = ndev->pdev; + resource_size_t size; u32 val; - int rc, i; + int rc = 0, i, spad; + + guard(mutex)(&nt->link_event_lock); /* send the local info, in the opposite order of the way we read it */ - for (i = 0; i < NTB_NUM_MW; i++) { - rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), - ntb_get_mw_size(ndev, i) >> 32); - if (rc) { - dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", - (u32)(ntb_get_mw_size(ndev, i) >> 32), - MW0_SZ_HIGH + (i * 2)); - goto out; - } - rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), - (u32) ntb_get_mw_size(ndev, i)); + if (nt->use_msi) { + rc = ntb_msi_setup_mws(ndev); if (rc) { - dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, i), - MW0_SZ_LOW + (i * 2)); - goto out; + dev_warn(&pdev->dev, + "Failed to register MSI memory window: %d\n", + rc); + nt->use_msi = false; } } - rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - NTB_NUM_MW, NUM_MWS); - goto out; - } + for (i = 0; i < nt->qp_count; i++) + ntb_transport_setup_qp_msi(nt, i); - rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - nt->max_qps, NUM_QPS); - goto out; - } + for (i = 0; i < nt->mw_count; i++) { + size = nt->mw_vec[i].phys_size; - rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - NTB_TRANSPORT_VERSION, VERSION); - goto out; - } + if (max_mw_size && size > max_mw_size) + size = max_mw_size; - /* Query the remote side for its info */ - rc = ntb_read_remote_spad(ndev, VERSION, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION); - goto out; + spad = MW0_SZ_HIGH + (i * 2); + ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size)); + + spad = MW0_SZ_LOW + (i * 2); + ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size)); } - if (val != NTB_TRANSPORT_VERSION) - goto out; - dev_dbg(&pdev->dev, "Remote version = %d\n", val); + ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count); - rc = ntb_read_remote_spad(ndev, NUM_QPS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS); - goto out; - } + ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count); - if (val != nt->max_qps) - goto out; - dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); + ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION); - rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); + /* Query the remote side for its info */ + val = ntb_spad_read(ndev, VERSION); + dev_dbg(&pdev->dev, "Remote version = %d\n", val); + if (val != NTB_TRANSPORT_VERSION) goto out; - } - if (val != NTB_NUM_MW) + val = ntb_spad_read(ndev, NUM_QPS); + dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); + if (val != nt->qp_count) goto out; + + val = ntb_spad_read(ndev, NUM_MWS); dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); + if (val != nt->mw_count) + goto out; - for (i = 0; i < NTB_NUM_MW; i++) { + for (i = 0; i < nt->mw_count; i++) { u64 val64; - rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", - MW0_SZ_HIGH + (i * 2)); - goto out1; - } - - val64 = (u64) val << 32; - - rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", - MW0_SZ_LOW + (i * 2)); - goto out1; - } + val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2)); + val64 = (u64)val << 32; + val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2)); val64 |= val; - dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); + dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64); rc = ntb_set_mw(nt, i, val64); if (rc) goto out1; } - nt->transport_link = NTB_LINK_UP; + nt->link_is_up = true; - for (i = 0; i < nt->max_qps; i++) { - struct ntb_transport_qp *qp = &nt->qps[i]; + for (i = 0; i < nt->qp_count; i++) { + struct ntb_transport_qp *qp = &nt->qp_vec[i]; ntb_transport_setup_qp_mw(nt, i); + ntb_transport_setup_qp_peer_msi(nt, i); - if (qp->client_ready == NTB_LINK_UP) + if (qp->client_ready) schedule_delayed_work(&qp->link_work, 0); } return; out1: - for (i = 0; i < NTB_NUM_MW; i++) + for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); + + /* if there's an actual failure, we should just bail */ + if (rc < 0) + return; + out: - if (ntb_hw_link_status(ndev)) + if (ntb_link_is_up(ndev, NULL, NULL) == 1) schedule_delayed_work(&nt->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); } @@ -758,135 +1153,251 @@ static void ntb_qp_link_work(struct work_struct *work) struct ntb_transport_qp *qp = container_of(work, struct ntb_transport_qp, link_work.work); - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); - struct ntb_transport *nt = qp->transport; - int rc, val; + struct pci_dev *pdev = qp->ndev->pdev; + struct ntb_transport_ctx *nt = qp->transport; + int val; - WARN_ON(nt->transport_link != NTB_LINK_UP); + WARN_ON(!nt->link_is_up); - rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); - return; - } + val = ntb_spad_read(nt->ndev, QP_LINKS); - rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num); - if (rc) - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - val | 1 << qp->qp_num, QP_LINKS); + ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); /* query remote spad for qp ready bits */ - rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val); - if (rc) - dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS); - - dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val); + dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); /* See if the remote side is up */ - if (1 << qp->qp_num & val) { - qp->qp_link = NTB_LINK_UP; - + if (val & BIT(qp->qp_num)) { dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num); + qp->link_is_up = true; + qp->active = true; + if (qp->event_handler) - qp->event_handler(qp->cb_data, NTB_LINK_UP); - } else if (nt->transport_link == NTB_LINK_UP) + qp->event_handler(qp->cb_data, qp->link_is_up); + + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); + } else if (nt->link_is_up) schedule_delayed_work(&qp->link_work, msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); } -static void ntb_transport_init_queue(struct ntb_transport *nt, - unsigned int qp_num) +static int ntb_transport_init_queue(struct ntb_transport_ctx *nt, + unsigned int qp_num) { struct ntb_transport_qp *qp; + phys_addr_t mw_base; + resource_size_t mw_size; unsigned int num_qps_mw, tx_size; - u8 mw_num = QP_TO_MW(qp_num); + unsigned int mw_num, mw_count, qp_count; + u64 qp_offset; + + mw_count = nt->mw_count; + qp_count = nt->qp_count; - qp = &nt->qps[qp_num]; + mw_num = QP_TO_MW(nt, qp_num); + + qp = &nt->qp_vec[qp_num]; qp->qp_num = qp_num; qp->transport = nt; qp->ndev = nt->ndev; - qp->qp_link = NTB_LINK_DOWN; - qp->client_ready = NTB_LINK_DOWN; + qp->client_ready = false; qp->event_handler = NULL; + ntb_qp_link_context_reset(qp); - if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW) - num_qps_mw = nt->max_qps / NTB_NUM_MW + 1; + if (mw_num < qp_count % mw_count) + num_qps_mw = qp_count / mw_count + 1; else - num_qps_mw = nt->max_qps / NTB_NUM_MW; + num_qps_mw = qp_count / mw_count; + + mw_base = nt->mw_vec[mw_num].phys_addr; + mw_size = nt->mw_vec[mw_num].phys_size; + + if (max_mw_size && mw_size > max_mw_size) + mw_size = max_mw_size; + + tx_size = (unsigned int)mw_size / num_qps_mw; + qp_offset = tx_size * (qp_num / mw_count); + + qp->tx_mw_size = tx_size; + qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; + if (!qp->tx_mw) + return -EINVAL; + + qp->tx_mw_phys = mw_base + qp_offset; + if (!qp->tx_mw_phys) + return -EINVAL; - tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw; - qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) + - (qp_num / NTB_NUM_MW * tx_size); tx_size -= sizeof(struct ntb_rx_info); + qp->rx_info = qp->tx_mw + tx_size; - qp->tx_mw = qp->rx_info + 1; /* Due to housekeeping, there must be atleast 2 buffs */ qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - if (nt->debugfs_dir) { + if (nt->debugfs_node_dir) { char debugfs_name[4]; snprintf(debugfs_name, 4, "qp%d", qp_num); qp->debugfs_dir = debugfs_create_dir(debugfs_name, - nt->debugfs_dir); + nt->debugfs_node_dir); qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, qp->debugfs_dir, qp, &ntb_qp_debugfs_stats); + } else { + qp->debugfs_dir = NULL; + qp->debugfs_stats = NULL; } INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); - INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup); + INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); - spin_lock_init(&qp->ntb_rx_pend_q_lock); - spin_lock_init(&qp->ntb_rx_free_q_lock); + spin_lock_init(&qp->ntb_rx_q_lock); spin_lock_init(&qp->ntb_tx_free_q_lock); + INIT_LIST_HEAD(&qp->rx_post_q); INIT_LIST_HEAD(&qp->rx_pend_q); INIT_LIST_HEAD(&qp->rx_free_q); INIT_LIST_HEAD(&qp->tx_free_q); + + tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db, + (unsigned long)qp); + + return 0; } -int ntb_transport_init(struct pci_dev *pdev) +static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) { - struct ntb_transport *nt; + struct ntb_transport_ctx *nt; + struct ntb_transport_mw *mw; + unsigned int mw_count, qp_count, spad_count, max_mw_count_for_spads; + u64 qp_bitmap; + int node; int rc, i; - nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL); + mw_count = ntb_peer_mw_count(ndev); + + if (!ndev->ops->mw_set_trans) { + dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); + return -EINVAL; + } + + if (ntb_db_is_unsafe(ndev)) + dev_dbg(&ndev->dev, + "doorbell is unsafe, proceed anyway...\n"); + if (ntb_spad_is_unsafe(ndev)) + dev_dbg(&ndev->dev, + "scratchpad is unsafe, proceed anyway...\n"); + + if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT) + dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n"); + + node = dev_to_node(&ndev->dev); + + nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); if (!nt) return -ENOMEM; - if (debugfs_initialized()) - nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); - else - nt->debugfs_dir = NULL; + nt->ndev = ndev; + + /* + * If we are using MSI, and have at least one extra memory window, + * we will reserve the last MW for the MSI window. + */ + if (use_msi && mw_count > 1) { + rc = ntb_msi_init(ndev, ntb_transport_msi_desc_changed); + if (!rc) { + mw_count -= 1; + nt->use_msi = true; + } + } + + spad_count = ntb_spad_count(ndev); + + /* Limit the MW's based on the availability of scratchpads */ + + if (spad_count < NTB_TRANSPORT_MIN_SPADS) { + nt->mw_count = 0; + rc = -EINVAL; + goto err; + } + + max_mw_count_for_spads = (spad_count - MW0_SZ_HIGH) / 2; + nt->mw_count = min(mw_count, max_mw_count_for_spads); - nt->ndev = ntb_register_transport(pdev, nt); - if (!nt->ndev) { - rc = -EIO; + nt->msi_spad_offset = nt->mw_count * 2 + MW0_SZ_HIGH; + + nt->mw_vec = kcalloc_node(mw_count, sizeof(*nt->mw_vec), + GFP_KERNEL, node); + if (!nt->mw_vec) { + rc = -ENOMEM; goto err; } - nt->max_qps = min(nt->ndev->max_cbs, max_num_clients); + for (i = 0; i < mw_count; i++) { + mw = &nt->mw_vec[i]; + + rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, + &mw->phys_size); + if (rc) + goto err1; + + mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size); + if (!mw->vbase) { + rc = -ENOMEM; + goto err1; + } + + mw->buff_size = 0; + mw->xlat_size = 0; + mw->virt_addr = NULL; + mw->dma_addr = 0; + } + + qp_bitmap = ntb_db_valid_mask(ndev); + + qp_count = ilog2(qp_bitmap); + if (nt->use_msi) { + qp_count -= 1; + nt->msi_db_mask = BIT_ULL(qp_count); + ntb_db_clear_mask(ndev, nt->msi_db_mask); + } + + if (max_num_clients && max_num_clients < qp_count) + qp_count = max_num_clients; + else if (nt->mw_count < qp_count) + qp_count = nt->mw_count; - nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp), - GFP_KERNEL); - if (!nt->qps) { + qp_bitmap &= BIT_ULL(qp_count) - 1; + + nt->qp_count = qp_count; + nt->qp_bitmap = qp_bitmap; + nt->qp_bitmap_free = qp_bitmap; + + nt->qp_vec = kcalloc_node(qp_count, sizeof(*nt->qp_vec), + GFP_KERNEL, node); + if (!nt->qp_vec) { rc = -ENOMEM; goto err1; } - nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1; + if (nt_debugfs_dir) { + nt->debugfs_node_dir = + debugfs_create_dir(pci_name(ndev->pdev), + nt_debugfs_dir); + } - for (i = 0; i < nt->max_qps; i++) - ntb_transport_init_queue(nt, i); + for (i = 0; i < qp_count; i++) { + rc = ntb_transport_init_queue(nt, i); + if (rc) + goto err2; + } INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work); - INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup); + INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work); - rc = ntb_register_event_callback(nt->ndev, - ntb_transport_event_callback); + rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops); if (rc) goto err2; @@ -895,66 +1406,238 @@ int ntb_transport_init(struct pci_dev *pdev) if (rc) goto err3; - if (ntb_hw_link_status(nt->ndev)) - schedule_delayed_work(&nt->link_work, 0); + nt->link_is_up = false; + ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); + ntb_link_event(ndev); return 0; err3: - ntb_unregister_event_callback(nt->ndev); + ntb_clear_ctx(ndev); err2: - kfree(nt->qps); + kfree(nt->qp_vec); err1: - ntb_unregister_transport(nt->ndev); + while (i--) { + mw = &nt->mw_vec[i]; + iounmap(mw->vbase); + } + kfree(nt->mw_vec); err: - debugfs_remove_recursive(nt->debugfs_dir); kfree(nt); return rc; } -void ntb_transport_free(void *transport) +static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev) { - struct ntb_transport *nt = transport; - struct pci_dev *pdev; + struct ntb_transport_ctx *nt = ndev->ctx; + struct ntb_transport_qp *qp; + u64 qp_bitmap_alloc; int i; - nt->transport_link = NTB_LINK_DOWN; + ntb_transport_link_cleanup(nt); + cancel_work_sync(&nt->link_cleanup); + cancel_delayed_work_sync(&nt->link_work); + + qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free; /* verify that all the qp's are freed */ - for (i = 0; i < nt->max_qps; i++) - if (!test_bit(i, &nt->qp_bitmap)) - ntb_transport_free_queue(&nt->qps[i]); + for (i = 0; i < nt->qp_count; i++) { + qp = &nt->qp_vec[i]; + if (qp_bitmap_alloc & BIT_ULL(i)) + ntb_transport_free_queue(qp); + debugfs_remove_recursive(qp->debugfs_dir); + } + + ntb_link_disable(ndev); + ntb_clear_ctx(ndev); ntb_bus_remove(nt); - cancel_delayed_work_sync(&nt->link_work); + for (i = nt->mw_count; i--; ) { + ntb_free_mw(nt, i); + iounmap(nt->mw_vec[i].vbase); + } - debugfs_remove_recursive(nt->debugfs_dir); + kfree(nt->qp_vec); + kfree(nt->mw_vec); + kfree(nt); +} - ntb_unregister_event_callback(nt->ndev); +static void ntb_complete_rxc(struct ntb_transport_qp *qp) +{ + struct ntb_queue_entry *entry; + void *cb_data; + unsigned int len; + unsigned long irqflags; - pdev = ntb_query_pdev(nt->ndev); + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); - for (i = 0; i < NTB_NUM_MW; i++) - ntb_free_mw(nt, i); + while (!list_empty(&qp->rx_post_q)) { + entry = list_first_entry(&qp->rx_post_q, + struct ntb_queue_entry, entry); + if (!(entry->flags & DESC_DONE_FLAG)) + break; - kfree(nt->qps); - ntb_unregister_transport(nt->ndev); - kfree(nt); + entry->rx_hdr->flags = 0; + iowrite32(entry->rx_index, &qp->rx_info->entry); + + cb_data = entry->cb_data; + len = entry->len; + + list_move_tail(&entry->entry, &qp->rx_free_q); + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); + + if (qp->rx_handler && qp->client_ready) + qp->rx_handler(qp, qp->cb_data, cb_data, len); + + spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags); + } + + spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags); } -static void ntb_rx_copy_task(struct ntb_transport_qp *qp, - struct ntb_queue_entry *entry, void *offset) +static void ntb_rx_copy_callback(void *data, + const struct dmaengine_result *res) { - void *cb_data = entry->cb_data; - unsigned int len = entry->len; + struct ntb_queue_entry *entry = data; + + /* we need to check DMA results if we are using DMA */ + if (res) { + enum dmaengine_tx_result dma_err = res->result; + + switch (dma_err) { + case DMA_TRANS_READ_FAILED: + case DMA_TRANS_WRITE_FAILED: + entry->errors++; + fallthrough; + case DMA_TRANS_ABORTED: + { + struct ntb_transport_qp *qp = entry->qp; + void *offset = qp->rx_buff + qp->rx_max_frame * + qp->rx_index; + + ntb_memcpy_rx(entry, offset); + qp->rx_memcpy++; + return; + } - memcpy(entry->buf, offset, entry->len); + case DMA_TRANS_NOERROR: + default: + break; + } + } + + entry->flags |= DESC_DONE_FLAG; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_complete_rxc(entry->qp); +} + +static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) +{ + void *buf = entry->buf; + size_t len = entry->len; + + memcpy(buf, offset, len); + + /* Ensure that the data is fully copied out before clearing the flag */ + wmb(); - if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) - qp->rx_handler(qp, qp->cb_data, cb_data, len); + ntb_rx_copy_callback(entry, NULL); +} + +static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset) +{ + struct dma_async_tx_descriptor *txd; + struct ntb_transport_qp *qp = entry->qp; + struct dma_chan *chan = qp->rx_dma_chan; + struct dma_device *device; + size_t pay_off, buff_off, len; + struct dmaengine_unmap_data *unmap; + dma_cookie_t cookie; + void *buf = entry->buf; + + len = entry->len; + device = chan->device; + pay_off = (size_t)offset & ~PAGE_MASK; + buff_off = (size_t)buf & ~PAGE_MASK; + + if (!is_dma_copy_aligned(device, pay_off, buff_off, len)) + goto err; + + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); + if (!unmap) + goto err; + + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset), + pay_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_FROM_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[1])) + goto err_get_unmap; + + unmap->from_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1], + unmap->addr[0], len, + DMA_PREP_INTERRUPT); + if (!txd) + goto err_get_unmap; + + txd->callback_result = ntb_rx_copy_callback; + txd->callback_param = entry; + dma_set_unmap(txd, unmap); + + cookie = dmaengine_submit(txd); + if (dma_submit_error(cookie)) + goto err_set_unmap; + + dmaengine_unmap_put(unmap); + + qp->last_cookie = cookie; + + qp->rx_async++; + + return 0; + +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err: + return -ENXIO; +} + +static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) +{ + struct ntb_transport_qp *qp = entry->qp; + struct dma_chan *chan = qp->rx_dma_chan; + int res; + + if (!chan) + goto err; + + if (entry->len < copy_bytes) + goto err; + + res = ntb_async_rx_submit(entry, offset); + if (res < 0) + goto err; + + if (!entry->retries) + qp->rx_async++; + + return; + +err: + ntb_memcpy_rx(entry, offset); + qp->rx_memcpy++; } static int ntb_process_rxc(struct ntb_transport_qp *qp) @@ -966,65 +1649,62 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); - if (!entry) { - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, - "no buffer - HDR ver %u, len %d, flags %x\n", - hdr->ver, hdr->len, hdr->flags); - qp->rx_err_no_buf++; - return -ENOMEM; - } + dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n", + qp->qp_num, hdr->ver, hdr->len, hdr->flags); if (!(hdr->flags & DESC_DONE_FLAG)) { - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, - &qp->rx_pend_q); + dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n"); qp->rx_ring_empty++; return -EAGAIN; } - if (hdr->ver != (u32) qp->rx_pkts) { - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, - "qp %d: version mismatch, expected %llu - got %u\n", - qp->qp_num, qp->rx_pkts, hdr->ver); - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, - &qp->rx_pend_q); + if (hdr->flags & LINK_DOWN_FLAG) { + dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n"); + ntb_qp_link_down(qp); + hdr->flags = 0; + return -EAGAIN; + } + + if (hdr->ver != (u32)qp->rx_pkts) { + dev_dbg(&qp->ndev->pdev->dev, + "version mismatch, expected %llu - got %u\n", + qp->rx_pkts, hdr->ver); qp->rx_err_ver++; return -EIO; } - if (hdr->flags & LINK_DOWN_FLAG) { - ntb_qp_link_down(qp); - - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, - &qp->rx_pend_q); - goto out; + entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); + if (!entry) { + dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); + qp->rx_err_no_buf++; + return -EAGAIN; } - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, - "rx offset %u, ver %u - %d payload received, buf size %d\n", - qp->rx_index, hdr->ver, hdr->len, entry->len); + entry->rx_hdr = hdr; + entry->rx_index = qp->rx_index; - if (hdr->len <= entry->len) { - entry->len = hdr->len; - ntb_rx_copy_task(qp, entry, offset); + if (hdr->len > entry->len) { + dev_dbg(&qp->ndev->pdev->dev, + "receive buffer overflow! Wanted %d got %d\n", + hdr->len, entry->len); + qp->rx_err_oflow++; + + entry->len = -EIO; + entry->flags |= DESC_DONE_FLAG; + + ntb_complete_rxc(qp); } else { - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, - &qp->rx_pend_q); + dev_dbg(&qp->ndev->pdev->dev, + "RX OK index %u ver %u size %d into buf size %d\n", + qp->rx_index, hdr->ver, hdr->len, entry->len); - qp->rx_err_oflow++; - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, - "RX overflow! Wanted %d got %d\n", - hdr->len, entry->len); - } + qp->rx_bytes += hdr->len; + qp->rx_pkts++; - qp->rx_bytes += hdr->len; - qp->rx_pkts++; + entry->len = hdr->len; -out: - /* Ensure that the data is fully copied out before clearing the flag */ - wmb(); - hdr->flags = 0; - iowrite32(qp->rx_index, &qp->rx_info->entry); + ntb_async_rx(entry, offset); + } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; @@ -1032,11 +1712,14 @@ out: return 0; } -static void ntb_transport_rx(unsigned long data) +static void ntb_transport_rxc_db(unsigned long data) { - struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; + struct ntb_transport_qp *qp = (void *)data; int rc, i; + dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n", + __func__, qp->qp_num); + /* Limit the number of packets processed in a single interrupt to * provide fairness to others */ @@ -1045,35 +1728,69 @@ static void ntb_transport_rx(unsigned long data) if (rc) break; } -} -static void ntb_transport_rxc_db(void *data, int db_num) -{ - struct ntb_transport_qp *qp = data; - - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n", - __func__, db_num); - - tasklet_schedule(&qp->rx_work); + if (i && qp->rx_dma_chan) + dma_async_issue_pending(qp->rx_dma_chan); + + if (i == qp->rx_max_entry) { + /* there is more work to do */ + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); + } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) { + /* the doorbell bit is set: clear it */ + ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num)); + /* ntb_db_read ensures ntb_db_clear write is committed */ + ntb_db_read(qp->ndev); + + /* an interrupt may have arrived between finishing + * ntb_process_rxc and clearing the doorbell bit: + * there might be some more work to do. + */ + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); + } } -static void ntb_tx_copy_task(struct ntb_transport_qp *qp, - struct ntb_queue_entry *entry, - void __iomem *offset) +static void ntb_tx_copy_callback(void *data, + const struct dmaengine_result *res) { - struct ntb_payload_header __iomem *hdr; - - memcpy_toio(offset, entry->buf, entry->len); + struct ntb_queue_entry *entry = data; + struct ntb_transport_qp *qp = entry->qp; + struct ntb_payload_header __iomem *hdr = entry->tx_hdr; + + /* we need to check DMA results if we are using DMA */ + if (res) { + enum dmaengine_tx_result dma_err = res->result; + + switch (dma_err) { + case DMA_TRANS_READ_FAILED: + case DMA_TRANS_WRITE_FAILED: + entry->errors++; + fallthrough; + case DMA_TRANS_ABORTED: + { + void __iomem *offset = + qp->tx_mw + qp->tx_max_frame * + entry->tx_index; + + /* resubmit via CPU */ + ntb_memcpy_tx(entry, offset); + qp->tx_memcpy++; + return; + } - hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); - iowrite32(entry->len, &hdr->len); - iowrite32((u32) qp->tx_pkts, &hdr->ver); + case DMA_TRANS_NOERROR: + default: + break; + } + } - /* Ensure that the data is fully copied out before setting the flag */ - wmb(); iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); - ntb_ring_sdb(qp->ndev, qp->qp_num); + if (qp->use_msi) + ntb_msi_peer_trigger(qp->ndev, PIDX, &qp->peer_msi_desc); + else + ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num)); /* The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these @@ -1090,31 +1807,137 @@ static void ntb_tx_copy_task(struct ntb_transport_qp *qp, ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); } -static int ntb_process_tx(struct ntb_transport_qp *qp, - struct ntb_queue_entry *entry) +static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) +{ +#ifdef ARCH_HAS_NOCACHE_UACCESS + /* + * Using non-temporal mov to improve performance on non-cached + * writes, even though we aren't actually copying from user space. + */ + __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); +#else + memcpy_toio(offset, entry->buf, entry->len); +#endif + + /* Ensure that the data is fully copied out before setting the flags */ + wmb(); + + ntb_tx_copy_callback(entry, NULL); +} + +static int ntb_async_tx_submit(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry) { + struct dma_async_tx_descriptor *txd; + struct dma_chan *chan = qp->tx_dma_chan; + struct dma_device *device; + size_t len = entry->len; + void *buf = entry->buf; + size_t dest_off, buff_off; + struct dmaengine_unmap_data *unmap; + dma_addr_t dest; + dma_cookie_t cookie; + + device = chan->device; + dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; + buff_off = (size_t)buf & ~PAGE_MASK; + dest_off = (size_t)dest & ~PAGE_MASK; + + if (!is_dma_copy_aligned(device, buff_off, dest_off, len)) + goto err; + + unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT); + if (!unmap) + goto err; + + unmap->len = len; + unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf), + buff_off, len, DMA_TO_DEVICE); + if (dma_mapping_error(device->dev, unmap->addr[0])) + goto err_get_unmap; + + unmap->to_cnt = 1; + + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len, + DMA_PREP_INTERRUPT); + if (!txd) + goto err_get_unmap; + + txd->callback_result = ntb_tx_copy_callback; + txd->callback_param = entry; + dma_set_unmap(txd, unmap); + + cookie = dmaengine_submit(txd); + if (dma_submit_error(cookie)) + goto err_set_unmap; + + dmaengine_unmap_put(unmap); + + dma_async_issue_pending(chan); + + return 0; +err_set_unmap: + dmaengine_unmap_put(unmap); +err_get_unmap: + dmaengine_unmap_put(unmap); +err: + return -ENXIO; +} + +static void ntb_async_tx(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry) +{ + struct ntb_payload_header __iomem *hdr; + struct dma_chan *chan = qp->tx_dma_chan; void __iomem *offset; + int res; + + entry->tx_index = qp->tx_index; + offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index; + hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header); + entry->tx_hdr = hdr; + + iowrite32(entry->len, &hdr->len); + iowrite32((u32)qp->tx_pkts, &hdr->ver); + + if (!chan) + goto err; - offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; + if (entry->len < copy_bytes) + goto err; + + res = ntb_async_tx_submit(qp, entry); + if (res < 0) + goto err; + + if (!entry->retries) + qp->tx_async++; + + return; + +err: + ntb_memcpy_tx(entry, offset); + qp->tx_memcpy++; +} - dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n", - qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags, - entry->buf); - if (qp->tx_index == qp->remote_rx_info->entry) { +static int ntb_process_tx(struct ntb_transport_qp *qp, + struct ntb_queue_entry *entry) +{ + if (!ntb_transport_tx_free_entry(qp)) { qp->tx_ring_full++; return -EAGAIN; } if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { if (qp->tx_handler) - qp->tx_handler(qp->cb_data, qp, NULL, -EIO); + qp->tx_handler(qp, qp->cb_data, NULL, -EIO); ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); return 0; } - ntb_tx_copy_task(qp, entry, offset); + ntb_async_tx(qp, entry); qp->tx_index++; qp->tx_index %= qp->tx_max_entry; @@ -1126,15 +1949,14 @@ static int ntb_process_tx(struct ntb_transport_qp *qp, static void ntb_send_link_down(struct ntb_transport_qp *qp) { - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); + struct pci_dev *pdev = qp->ndev->pdev; struct ntb_queue_entry *entry; int i, rc; - if (qp->qp_link == NTB_LINK_DOWN) + if (!qp->link_is_up) return; - qp->qp_link = NTB_LINK_DOWN; - dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num); + dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num); for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); @@ -1155,13 +1977,20 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp) if (rc) dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n", qp->qp_num); + + ntb_qp_link_down_reset(qp); +} + +static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node) +{ + return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; } /** * ntb_transport_create_queue - Create a new NTB transport layer queue - * @rx_handler: receive callback function - * @tx_handler: transmit callback function - * @event_handler: event callback function + * @data: pointer for callback data + * @client_dev: &struct device pointer + * @handlers: pointer to various ntb queue (callback) handlers * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be @@ -1172,72 +2001,126 @@ static void ntb_send_link_down(struct ntb_transport_qp *qp) * RETURNS: pointer to newly created ntb_queue, NULL on error. */ struct ntb_transport_qp * -ntb_transport_create_queue(void *data, struct pci_dev *pdev, +ntb_transport_create_queue(void *data, struct device *client_dev, const struct ntb_queue_handlers *handlers) { + struct ntb_dev *ndev; + struct pci_dev *pdev; + struct ntb_transport_ctx *nt; struct ntb_queue_entry *entry; struct ntb_transport_qp *qp; - struct ntb_transport *nt; + u64 qp_bit; unsigned int free_queue; - int rc, i; + dma_cap_mask_t dma_mask; + int node; + int i; - nt = ntb_find_transport(pdev); - if (!nt) - goto err; + ndev = dev_ntb(client_dev->parent); + pdev = ndev->pdev; + nt = ndev->ctx; - free_queue = ffs(nt->qp_bitmap); + node = dev_to_node(&ndev->dev); + + free_queue = ffs(nt->qp_bitmap_free); if (!free_queue) goto err; /* decrement free_queue to make it zero based */ free_queue--; - clear_bit(free_queue, &nt->qp_bitmap); + qp = &nt->qp_vec[free_queue]; + qp_bit = BIT_ULL(qp->qp_num); + + nt->qp_bitmap_free &= ~qp_bit; - qp = &nt->qps[free_queue]; qp->cb_data = data; qp->rx_handler = handlers->rx_handler; qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; + dma_cap_zero(dma_mask); + dma_cap_set(DMA_MEMCPY, dma_mask); + + if (use_dma) { + qp->tx_dma_chan = + dma_request_channel(dma_mask, ntb_dma_filter_fn, + (void *)(unsigned long)node); + if (!qp->tx_dma_chan) + dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n"); + + qp->rx_dma_chan = + dma_request_channel(dma_mask, ntb_dma_filter_fn, + (void *)(unsigned long)node); + if (!qp->rx_dma_chan) + dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n"); + } else { + qp->tx_dma_chan = NULL; + qp->rx_dma_chan = NULL; + } + + qp->tx_mw_dma_addr = 0; + if (qp->tx_dma_chan) { + qp->tx_mw_dma_addr = + dma_map_resource(qp->tx_dma_chan->device->dev, + qp->tx_mw_phys, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); + if (dma_mapping_error(qp->tx_dma_chan->device->dev, + qp->tx_mw_dma_addr)) { + qp->tx_mw_dma_addr = 0; + goto err1; + } + } + + dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", + qp->tx_dma_chan ? "DMA" : "CPU"); + + dev_dbg(&pdev->dev, "Using %s memcpy for RX\n", + qp->rx_dma_chan ? "DMA" : "CPU"); + for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { - entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); + entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); if (!entry) goto err1; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, + entry->qp = qp; + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); } + qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES; - for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { - entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC); + for (i = 0; i < qp->tx_max_entry; i++) { + entry = kzalloc_node(sizeof(*entry), GFP_KERNEL, node); if (!entry) goto err2; + entry->qp = qp; ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); } - tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp); - - rc = ntb_register_db_callback(qp->ndev, free_queue, qp, - ntb_transport_rxc_db); - if (rc) - goto err3; + ntb_db_clear(qp->ndev, qp_bit); + ntb_db_clear_mask(qp->ndev, qp_bit); dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num); return qp; -err3: - tasklet_disable(&qp->rx_work); err2: while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); err1: - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + qp->rx_alloc_entry = 0; + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - set_bit(free_queue, &nt->qp_bitmap); + if (qp->tx_mw_dma_addr) + dma_unmap_resource(qp->tx_dma_chan->device->dev, + qp->tx_mw_dma_addr, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); + if (qp->tx_dma_chan) + dma_release_channel(qp->tx_dma_chan); + if (qp->rx_dma_chan) + dma_release_channel(qp->rx_dma_chan); + nt->qp_bitmap_free |= qp_bit; err: return NULL; } @@ -1253,29 +2136,79 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { struct pci_dev *pdev; struct ntb_queue_entry *entry; + u64 qp_bit; if (!qp) return; - pdev = ntb_query_pdev(qp->ndev); + pdev = qp->ndev->pdev; + + qp->active = false; + + if (qp->tx_dma_chan) { + struct dma_chan *chan = qp->tx_dma_chan; + /* Putting the dma_chan to NULL will force any new traffic to be + * processed by the CPU instead of the DAM engine + */ + qp->tx_dma_chan = NULL; + + /* Try to be nice and wait for any queued DMA engine + * transactions to process before smashing it with a rock + */ + dma_sync_wait(chan, qp->last_cookie); + dmaengine_terminate_all(chan); + + dma_unmap_resource(chan->device->dev, + qp->tx_mw_dma_addr, qp->tx_mw_size, + DMA_FROM_DEVICE, 0); + + dma_release_channel(chan); + } + + if (qp->rx_dma_chan) { + struct dma_chan *chan = qp->rx_dma_chan; + /* Putting the dma_chan to NULL will force any new traffic to be + * processed by the CPU instead of the DAM engine + */ + qp->rx_dma_chan = NULL; + + /* Try to be nice and wait for any queued DMA engine + * transactions to process before smashing it with a rock + */ + dma_sync_wait(chan, qp->last_cookie); + dmaengine_terminate_all(chan); + dma_release_channel(chan); + } + + qp_bit = BIT_ULL(qp->qp_num); + + ntb_db_set_mask(qp->ndev, qp_bit); + tasklet_kill(&qp->rxc_db_work); cancel_delayed_work_sync(&qp->link_work); - ntb_unregister_db_callback(qp->ndev, qp->qp_num); - tasklet_disable(&qp->rx_work); + qp->cb_data = NULL; + qp->rx_handler = NULL; + qp->tx_handler = NULL; + qp->event_handler = NULL; - while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) kfree(entry); - while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { - dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n"); + kfree(entry); + } + + while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { + dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n"); kfree(entry); } while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) kfree(entry); - set_bit(qp->qp_num, &qp->transport->qp_bitmap); + qp->transport->qp_bitmap_free |= qp_bit; dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); } @@ -1296,17 +2229,17 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) struct ntb_queue_entry *entry; void *buf; - if (!qp || qp->client_ready == NTB_LINK_UP) + if (!qp || qp->client_ready) return NULL; - entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); if (!entry) return NULL; buf = entry->cb_data; *len = entry->len; - ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); return buf; } @@ -1332,15 +2265,22 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, if (!qp) return -EINVAL; - entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); + entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); if (!entry) return -ENOMEM; entry->cb_data = cb; entry->buf = data; entry->len = len; + entry->flags = 0; + entry->retries = 0; + entry->errors = 0; + entry->rx_index = 0; + + ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); - ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); return 0; } @@ -1354,7 +2294,7 @@ EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue); * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB - * payload will be transmitted. This assumes that a lock is behing held to + * payload will be transmitted. This assumes that a lock is being held to * serialize access to the qp. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. @@ -1365,17 +2305,26 @@ int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, struct ntb_queue_entry *entry; int rc; - if (!qp || qp->qp_link != NTB_LINK_UP || !len) + if (!qp || !len) return -EINVAL; + /* If the qp link is down already, just ignore. */ + if (!qp->link_is_up) + return 0; + entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); - if (!entry) - return -ENOMEM; + if (!entry) { + qp->tx_err_no_buf++; + return -EBUSY; + } entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; + entry->errors = 0; + entry->retries = 0; + entry->tx_index = 0; rc = ntb_process_tx(qp, entry); if (rc) @@ -1397,9 +2346,9 @@ void ntb_transport_link_up(struct ntb_transport_qp *qp) if (!qp) return; - qp->client_ready = NTB_LINK_UP; + qp->client_ready = true; - if (qp->transport->transport_link == NTB_LINK_UP) + if (qp->transport->link_is_up) schedule_delayed_work(&qp->link_work, 0); } EXPORT_SYMBOL_GPL(ntb_transport_link_up); @@ -1410,32 +2359,22 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); * * Notify NTB transport layer of client's desire to no longer receive data on * transport queue specified. It is the client's responsibility to ensure all - * entries on queue are purged or otherwise handled appropraitely. + * entries on queue are purged or otherwise handled appropriately. */ void ntb_transport_link_down(struct ntb_transport_qp *qp) { - struct pci_dev *pdev; - int rc, val; + int val; if (!qp) return; - pdev = ntb_query_pdev(qp->ndev); - qp->client_ready = NTB_LINK_DOWN; + qp->client_ready = false; - rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); - return; - } + val = ntb_spad_read(qp->ndev, QP_LINKS); - rc = ntb_write_remote_spad(qp->ndev, QP_LINKS, - val & ~(1 << qp->qp_num)); - if (rc) - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - val & ~(1 << qp->qp_num), QP_LINKS); + ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); - if (qp->qp_link == NTB_LINK_UP) + if (qp->link_is_up) ntb_send_link_down(qp); else cancel_delayed_work_sync(&qp->link_work); @@ -1455,7 +2394,7 @@ bool ntb_transport_link_query(struct ntb_transport_qp *qp) if (!qp) return false; - return qp->qp_link == NTB_LINK_UP; + return qp->link_is_up; } EXPORT_SYMBOL_GPL(ntb_transport_link_query); @@ -1486,9 +2425,105 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); */ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { + unsigned int max_size; + unsigned int copy_align; + struct dma_chan *rx_chan, *tx_chan; + if (!qp) return 0; - return qp->tx_max_frame - sizeof(struct ntb_payload_header); + rx_chan = qp->rx_dma_chan; + tx_chan = qp->tx_dma_chan; + + copy_align = max(rx_chan ? rx_chan->device->copy_align : 0, + tx_chan ? tx_chan->device->copy_align : 0); + + /* If DMA engine usage is possible, try to find the max size for that */ + max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header); + max_size = round_down(max_size, 1 << copy_align); + + return max_size; } EXPORT_SYMBOL_GPL(ntb_transport_max_size); + +unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) +{ + unsigned int head = qp->tx_index; + unsigned int tail = qp->remote_rx_info->entry; + + return tail >= head ? tail - head : qp->tx_max_entry + tail - head; +} +EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry); + +static void ntb_transport_doorbell_callback(void *data, int vector) +{ + struct ntb_transport_ctx *nt = data; + struct ntb_transport_qp *qp; + u64 db_bits; + unsigned int qp_num; + + if (ntb_db_read(nt->ndev) & nt->msi_db_mask) { + ntb_transport_msi_peer_desc_changed(nt); + ntb_db_clear(nt->ndev, nt->msi_db_mask); + } + + db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free & + ntb_db_vector_mask(nt->ndev, vector)); + + while (db_bits) { + qp_num = __ffs(db_bits); + qp = &nt->qp_vec[qp_num]; + + if (qp->active) + tasklet_schedule(&qp->rxc_db_work); + + db_bits &= ~BIT_ULL(qp_num); + } +} + +static const struct ntb_ctx_ops ntb_transport_ops = { + .link_event = ntb_transport_event_callback, + .db_event = ntb_transport_doorbell_callback, +}; + +static struct ntb_client ntb_transport_client = { + .ops = { + .probe = ntb_transport_probe, + .remove = ntb_transport_free, + }, +}; + +static int __init ntb_transport_init(void) +{ + int rc; + + pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER); + + if (debugfs_initialized()) + nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + rc = bus_register(&ntb_transport_bus); + if (rc) + goto err_bus; + + rc = ntb_register_client(&ntb_transport_client); + if (rc) + goto err_client; + + return 0; + +err_client: + bus_unregister(&ntb_transport_bus); +err_bus: + debugfs_remove_recursive(nt_debugfs_dir); + return rc; +} +module_init(ntb_transport_init); + +static void __exit ntb_transport_exit(void) +{ + ntb_unregister_client(&ntb_transport_client); + bus_unregister(&ntb_transport_bus); + debugfs_remove_recursive(nt_debugfs_dir); +} +module_exit(ntb_transport_exit); |
