summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/xdomain.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/thunderbolt/xdomain.c')
-rw-r--r--drivers/thunderbolt/xdomain.c1525
1 files changed, 1257 insertions, 268 deletions
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index e27dd8beb94b..63c7be818b2c 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -8,18 +8,49 @@
*/
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <linux/prandom.h>
+#include <linux/string_helpers.h>
#include <linux/utsname.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
#include "tb.h"
-#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
-#define XDOMAIN_PROPERTIES_RETRIES 60
-#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
+#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
+#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
+#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
+#define XDOMAIN_RETRIES 10
+#define XDOMAIN_DEFAULT_MAX_HOPID 15
+
+enum {
+ XDOMAIN_STATE_INIT,
+ XDOMAIN_STATE_UUID,
+ XDOMAIN_STATE_LINK_STATUS,
+ XDOMAIN_STATE_LINK_STATE_CHANGE,
+ XDOMAIN_STATE_LINK_STATUS2,
+ XDOMAIN_STATE_BONDING_UUID_LOW,
+ XDOMAIN_STATE_BONDING_UUID_HIGH,
+ XDOMAIN_STATE_PROPERTIES,
+ XDOMAIN_STATE_ENUMERATED,
+ XDOMAIN_STATE_ERROR,
+};
+
+static const char * const state_names[] = {
+ [XDOMAIN_STATE_INIT] = "INIT",
+ [XDOMAIN_STATE_UUID] = "UUID",
+ [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
+ [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
+ [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
+ [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
+ [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
+ [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
+ [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
+ [XDOMAIN_STATE_ERROR] = "ERROR",
+};
struct xdomain_request_work {
struct work_struct work;
@@ -27,13 +58,19 @@ struct xdomain_request_work {
struct tb *tb;
};
-/* Serializes access to the properties and protocol handlers below */
+static bool tb_xdomain_enabled = true;
+module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
+MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
+
+/*
+ * Serializes access to the properties and protocol handlers below. If
+ * you need to take both this lock and the struct tb_xdomain lock, take
+ * this one first.
+ */
static DEFINE_MUTEX(xdomain_lock);
/* Properties exposed to the remote domains */
static struct tb_property_dir *xdomain_property_dir;
-static u32 *xdomain_property_block;
-static u32 xdomain_property_block_len;
static u32 xdomain_property_block_gen;
/* Additional protocol handlers */
@@ -44,6 +81,11 @@ static const uuid_t tb_xdp_uuid =
UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
+bool tb_is_xdomain_enabled(void)
+{
+ return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
+}
+
static bool tb_xdomain_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
@@ -118,7 +160,7 @@ static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
* This can be used to send a XDomain response message to the other
* domain. No response for the message is expected.
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
size_t size, enum tb_cfg_pkg_type type)
@@ -170,7 +212,7 @@ static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
* the other domain. The function waits until the response is received
* or when timeout triggers. Whichever comes first.
*
- * Return: %0 in case of success and negative errno in case of failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
size_t request_size, enum tb_cfg_pkg_type request_type,
@@ -198,21 +240,17 @@ static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
}
-static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
{
- const struct tb_xdp_error_response *error;
-
- if (hdr->type != ERROR_RESPONSE)
+ if (res->hdr.type != ERROR_RESPONSE)
return 0;
- error = (const struct tb_xdp_error_response *)hdr;
-
- switch (error->error) {
+ switch (res->error) {
case ERROR_UNKNOWN_PACKET:
case ERROR_UNKNOWN_DOMAIN:
return -EIO;
case ERROR_NOT_SUPPORTED:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
case ERROR_NOT_READY:
return -EAGAIN;
default:
@@ -222,6 +260,52 @@ static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
return 0;
}
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
+ uuid_t *uuid, u64 *remote_route)
+{
+ struct tb_xdp_uuid_response res;
+ struct tb_xdp_uuid req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ uuid_copy(uuid, &res.src_uuid);
+ *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
+
+ return 0;
+}
+
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ const uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
+ sizeof(res));
+
+ uuid_copy(&res.src_uuid, uuid);
+ res.src_route_hi = upper_32_bits(route);
+ res.src_route_lo = lower_32_bits(route);
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
enum tb_xdp_error error)
{
@@ -258,7 +342,6 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
- len = 0;
data_len = 0;
do {
@@ -269,7 +352,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
if (ret)
goto err;
- ret = tb_xdp_handle_error(&res->hdr);
+ ret = tb_xdp_handle_error(&res->err);
if (ret)
goto err;
@@ -329,8 +412,7 @@ err:
}
static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
- u64 route, u8 sequence, const uuid_t *src_uuid,
- const struct tb_xdp_properties *req)
+ struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
{
struct tb_xdp_properties_response *res;
size_t total_size;
@@ -342,39 +424,39 @@ static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
* protocol supports forwarding, though which we might add
* support later on.
*/
- if (!uuid_equal(src_uuid, &req->dst_uuid)) {
- tb_xdp_error_response(ctl, route, sequence,
+ if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
+ tb_xdp_error_response(ctl, xd->route, sequence,
ERROR_UNKNOWN_DOMAIN);
return 0;
}
- mutex_lock(&xdomain_lock);
+ mutex_lock(&xd->lock);
- if (req->offset >= xdomain_property_block_len) {
- mutex_unlock(&xdomain_lock);
+ if (req->offset >= xd->local_property_block_len) {
+ mutex_unlock(&xd->lock);
return -EINVAL;
}
- len = xdomain_property_block_len - req->offset;
+ len = xd->local_property_block_len - req->offset;
len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
total_size = sizeof(*res) + len * 4;
res = kzalloc(total_size, GFP_KERNEL);
if (!res) {
- mutex_unlock(&xdomain_lock);
+ mutex_unlock(&xd->lock);
return -ENOMEM;
}
- tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
+ tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
total_size);
- res->generation = xdomain_property_block_gen;
- res->data_length = xdomain_property_block_len;
+ res->generation = xd->local_property_block_gen;
+ res->data_length = xd->local_property_block_len;
res->offset = req->offset;
- uuid_copy(&res->src_uuid, src_uuid);
+ uuid_copy(&res->src_uuid, xd->local_uuid);
uuid_copy(&res->dst_uuid, &req->src_uuid);
- memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
+ memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
- mutex_unlock(&xdomain_lock);
+ mutex_unlock(&xd->lock);
ret = __tb_xdomain_response(ctl, res, total_size,
TB_CFG_PKG_XDOMAIN_RESP);
@@ -403,7 +485,7 @@ static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
if (ret)
return ret;
- return tb_xdp_handle_error(&res.hdr);
+ return tb_xdp_handle_error(&res.err);
}
static int
@@ -418,6 +500,111 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
TB_CFG_PKG_XDOMAIN_RESP);
}
+static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 *slw, u8 *tlw,
+ u8 *sls, u8 *tls)
+{
+ struct tb_xdp_link_state_status_response res;
+ struct tb_xdp_link_state_status req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ if (res.status != 0)
+ return -EREMOTEIO;
+
+ *slw = res.slw;
+ *tlw = res.tlw;
+ *sls = res.sls;
+ *tls = res.tls;
+
+ return 0;
+}
+
+static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
+ struct tb_xdomain *xd, u8 sequence)
+{
+ struct tb_xdp_link_state_status_response res;
+ struct tb_port *port = tb_xdomain_downstream_port(xd);
+ u32 val[2];
+ int ret;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, xd->route, sequence,
+ LINK_STATE_STATUS_RESPONSE, sizeof(res));
+
+ ret = tb_port_read(port, val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
+ res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
+ LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
+ res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+ res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
+ LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
+static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u8 tlw, u8 tls)
+{
+ struct tb_xdp_link_state_change_response res;
+ struct tb_xdp_link_state_change req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
+ sizeof(req));
+ req.tlw = tlw;
+ req.tls = tls;
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
+ &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.err);
+ if (ret)
+ return ret;
+
+ return res.status != 0 ? -EREMOTEIO : 0;
+}
+
+static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
+ u8 sequence, u32 status)
+{
+ struct tb_xdp_link_state_change_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
+ sizeof(res));
+
+ res.status = status;
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
/**
* tb_register_protocol_handler() - Register protocol handler
* @handler: Handler to register
@@ -426,6 +613,8 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
* messages. After this function is called the service driver needs to
* be able to handle calls to callback whenever a package with the
* registered protocol is received.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_protocol_handler(struct tb_protocol_handler *handler)
{
@@ -456,6 +645,87 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
}
EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
+static void update_property_block(struct tb_xdomain *xd)
+{
+ mutex_lock(&xdomain_lock);
+ mutex_lock(&xd->lock);
+ /*
+ * If the local property block is not up-to-date, rebuild it now
+ * based on the global property template.
+ */
+ if (!xd->local_property_block ||
+ xd->local_property_block_gen < xdomain_property_block_gen) {
+ struct tb_property_dir *dir;
+ int ret, block_len;
+ u32 *block;
+
+ dir = tb_property_copy_dir(xdomain_property_dir);
+ if (!dir) {
+ dev_warn(&xd->dev, "failed to copy properties\n");
+ goto out_unlock;
+ }
+
+ /* Fill in non-static properties now */
+ tb_property_add_text(dir, "deviceid", utsname()->nodename);
+ tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
+
+ ret = tb_property_format_dir(dir, NULL, 0);
+ if (ret < 0) {
+ dev_warn(&xd->dev, "local property block creation failed\n");
+ tb_property_free_dir(dir);
+ goto out_unlock;
+ }
+
+ block_len = ret;
+ block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
+ if (!block) {
+ tb_property_free_dir(dir);
+ goto out_unlock;
+ }
+
+ ret = tb_property_format_dir(dir, block, block_len);
+ if (ret) {
+ dev_warn(&xd->dev, "property block generation failed\n");
+ tb_property_free_dir(dir);
+ kfree(block);
+ goto out_unlock;
+ }
+
+ tb_property_free_dir(dir);
+ /* Release the previous block */
+ kfree(xd->local_property_block);
+ /* Assign new one */
+ xd->local_property_block = block;
+ xd->local_property_block_len = block_len;
+ xd->local_property_block_gen = xdomain_property_block_gen;
+ }
+
+out_unlock:
+ mutex_unlock(&xd->lock);
+ mutex_unlock(&xdomain_lock);
+}
+
+static void start_handshake(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_INIT;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+/* Can be called from state_work */
+static void __stop_handshake(struct tb_xdomain *xd)
+{
+ cancel_delayed_work_sync(&xd->properties_changed_work);
+ xd->properties_changed_retries = 0;
+ xd->state_retries = 0;
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+ cancel_delayed_work_sync(&xd->state_work);
+ __stop_handshake(xd);
+}
+
static void tb_xdp_handle_request(struct work_struct *work)
{
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
@@ -463,6 +733,7 @@ static void tb_xdp_handle_request(struct work_struct *work)
const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
struct tb *tb = xw->tb;
struct tb_ctl *ctl = tb->ctl;
+ struct tb_xdomain *xd;
const uuid_t *uuid;
int ret = 0;
u32 sequence;
@@ -484,16 +755,22 @@ static void tb_xdp_handle_request(struct work_struct *work)
goto out;
}
+ xd = tb_xdomain_find_by_route_locked(tb, route);
+ if (xd)
+ update_property_block(xd);
+
switch (pkg->type) {
case PROPERTIES_REQUEST:
- ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
- (const struct tb_xdp_properties *)pkg);
+ tb_dbg(tb, "%llx: received XDomain properties request\n", route);
+ if (xd) {
+ ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
+ (const struct tb_xdp_properties *)pkg);
+ }
break;
- case PROPERTIES_CHANGED_REQUEST: {
- const struct tb_xdp_properties_changed *xchg =
- (const struct tb_xdp_properties_changed *)pkg;
- struct tb_xdomain *xd;
+ case PROPERTIES_CHANGED_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain properties changed request\n",
+ route);
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
@@ -502,20 +779,67 @@ static void tb_xdp_handle_request(struct work_struct *work)
* the xdomain related to this connection as well in
* case there is a change in services it offers.
*/
- xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
+ if (xd && device_is_registered(&xd->dev))
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ break;
+
+ case UUID_REQUEST_OLD:
+ case UUID_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
+ ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+ /*
+ * If we've stopped the discovery with an error such as
+ * timing out, we will restart the handshake now that we
+ * received UUID request from the remote host.
+ */
+ if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
+ dev_dbg(&xd->dev, "restarting handshake\n");
+ start_handshake(xd);
+ }
+ break;
+
+ case LINK_STATE_STATUS_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state status request\n",
+ route);
+
if (xd) {
- queue_delayed_work(tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(50));
- tb_xdomain_put(xd);
+ ret = tb_xdp_link_state_status_response(tb, ctl, xd,
+ sequence);
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
}
+ break;
+ case LINK_STATE_CHANGE_REQUEST:
+ tb_dbg(tb, "%llx: received XDomain link state change request\n",
+ route);
+
+ if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
+ const struct tb_xdp_link_state_change *lsc =
+ (const struct tb_xdp_link_state_change *)pkg;
+
+ ret = tb_xdp_link_state_change_response(ctl, route,
+ sequence, 0);
+ xd->target_link_width = lsc->tlw;
+ queue_delayed_work(tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+ } else {
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_READY);
+ }
break;
- }
default:
+ tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_SUPPORTED);
break;
}
+ tb_xdomain_put(xd);
+
if (ret) {
tb_warn(tb, "failed to send XDomain response for %#x\n",
pkg->type);
@@ -524,9 +848,11 @@ static void tb_xdp_handle_request(struct work_struct *work)
out:
kfree(xw->pkg);
kfree(xw);
+
+ tb_domain_put(tb);
}
-static void
+static bool
tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
size_t size)
{
@@ -534,13 +860,18 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
xw = kmalloc(sizeof(*xw), GFP_KERNEL);
if (!xw)
- return;
+ return false;
INIT_WORK(&xw->work, tb_xdp_handle_request);
xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
- xw->tb = tb;
+ if (!xw->pkg) {
+ kfree(xw);
+ return false;
+ }
+ xw->tb = tb_domain_get(tb);
- queue_work(tb->wq, &xw->work);
+ schedule_work(&xw->work);
+ return true;
}
/**
@@ -548,6 +879,8 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
* @drv: Driver to register
*
* Registers new service driver from @drv to the bus.
+ *
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_service_driver(struct tb_service_driver *drv)
{
@@ -558,7 +891,7 @@ EXPORT_SYMBOL_GPL(tb_register_service_driver);
/**
* tb_unregister_service_driver() - Unregister XDomain service driver
- * @xdrv: Driver to unregister
+ * @drv: Driver to unregister
*
* Unregisters XDomain service driver from the bus.
*/
@@ -577,11 +910,11 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
* It should be null terminated but anything else is pretty much
* allowed.
*/
- return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
+ return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
-static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
{
return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
svc->prtcid, svc->prtcvers, svc->prtcrevs);
@@ -594,7 +927,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
/* Full buffer size except new line and null termination */
get_modalias(svc, buf, PAGE_SIZE - 2);
- return sprintf(buf, "%s\n", buf);
+ return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(modalias);
@@ -603,7 +936,7 @@ static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcid);
+ return sysfs_emit(buf, "%u\n", svc->prtcid);
}
static DEVICE_ATTR_RO(prtcid);
@@ -612,7 +945,7 @@ static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcvers);
+ return sysfs_emit(buf, "%u\n", svc->prtcvers);
}
static DEVICE_ATTR_RO(prtcvers);
@@ -621,7 +954,7 @@ static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "%u\n", svc->prtcrevs);
+ return sysfs_emit(buf, "%u\n", svc->prtcrevs);
}
static DEVICE_ATTR_RO(prtcrevs);
@@ -630,7 +963,7 @@ static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
- return sprintf(buf, "0x%08x\n", svc->prtcstns);
+ return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
}
static DEVICE_ATTR_RO(prtcstns);
@@ -644,7 +977,7 @@ static struct attribute *tb_service_attrs[] = {
NULL,
};
-static struct attribute_group tb_service_attr_group = {
+static const struct attribute_group tb_service_attr_group = {
.attrs = tb_service_attrs,
};
@@ -653,9 +986,9 @@ static const struct attribute_group *tb_service_attr_groups[] = {
NULL,
};
-static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
char modalias[64];
get_modalias(svc, modalias, sizeof(modalias));
@@ -667,12 +1000,13 @@ static void tb_service_release(struct device *dev)
struct tb_service *svc = container_of(dev, struct tb_service, dev);
struct tb_xdomain *xd = tb_service_parent(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ tb_service_debugfs_remove(svc);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -689,14 +1023,14 @@ static int remove_missing_service(struct device *dev, void *data)
if (!svc)
return 0;
- if (!tb_property_find(xd->properties, svc->key,
+ if (!tb_property_find(xd->remote_properties, svc->key,
TB_PROPERTY_TYPE_DIRECTORY))
device_unregister(dev);
return 0;
}
-static int find_service(struct device *dev, void *data)
+static int find_service(struct device *dev, const void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
@@ -740,6 +1074,7 @@ static void enumerate_services(struct tb_xdomain *xd)
struct tb_service *svc;
struct tb_property *p;
struct device *dev;
+ int id;
/*
* First remove all services that are not available anymore in
@@ -748,7 +1083,7 @@ static void enumerate_services(struct tb_xdomain *xd)
device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
/* Then re-enumerate properties creating new services as we go */
- tb_property_for_each(xd->properties, p) {
+ tb_property_for_each(xd->remote_properties, p) {
if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
continue;
@@ -768,12 +1103,20 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
+ if (id < 0) {
+ kfree(svc->key);
+ kfree(svc);
+ break;
+ }
+ svc->id = id;
svc->dev.bus = &tb_bus_type;
svc->dev.type = &tb_service_type;
svc->dev.parent = &xd->dev;
dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
+ tb_service_debugfs_init(svc);
+
if (device_register(&svc->dev)) {
put_device(&svc->dev);
break;
@@ -797,6 +1140,14 @@ static int populate_properties(struct tb_xdomain *xd,
return -EINVAL;
xd->vendor = p->value.immediate;
+ p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
+ /*
+ * USB4 inter-domain spec suggests using 15 as HopID if the
+ * other end does not announce it in a property. This is for
+ * TBT3 compatibility.
+ */
+ xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
+
kfree(xd->device_name);
xd->device_name = NULL;
kfree(xd->vendor_name);
@@ -813,23 +1164,227 @@ static int populate_properties(struct tb_xdomain *xd,
return 0;
}
-/* Called with @xd->lock held */
-static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
+static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
- if (!xd->resume)
- return;
+ bool change = false;
+ struct tb_port *port;
+ int ret;
+
+ port = tb_xdomain_downstream_port(xd);
- xd->resume = false;
- if (xd->transmit_path) {
- dev_dbg(&xd->dev, "re-establishing DMA path\n");
- tb_domain_approve_xdomain_paths(xd->tb, xd);
+ ret = tb_port_get_link_speed(port);
+ if (ret < 0)
+ return ret;
+
+ if (xd->link_speed != ret)
+ change = true;
+
+ xd->link_speed = ret;
+
+ ret = tb_port_get_link_width(port);
+ if (ret < 0)
+ return ret;
+
+ if (xd->link_width != ret)
+ change = true;
+
+ xd->link_width = ret;
+
+ if (change)
+ kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
+
+ return 0;
+}
+
+static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
+{
+ struct tb *tb = xd->tb;
+ uuid_t uuid;
+ u64 route;
+ int ret;
+
+ dev_dbg(&xd->dev, "requesting remote UUID\n");
+
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
+ &route);
+ if (ret < 0) {
+ if (xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "failed to read remote UUID\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
+
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ if (route == xd->route)
+ dev_dbg(&xd->dev, "loop back detected\n");
+ else
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+
+ /* Don't bond lanes automatically for loops */
+ xd->bonding_possible = false;
+ }
+
+ /*
+ * If the UUID is different, there is another domain connected
+ * so mark this one unplugged and wait for the connection
+ * manager to replace it.
+ */
+ if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
+ dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
+ xd->is_unplugged = true;
+ return -ENODEV;
+ }
+
+ /* First time fill in the missing UUID */
+ if (!xd->remote_uuid) {
+ xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ return -ENOMEM;
}
+
+ return 0;
}
-static void tb_xdomain_get_properties(struct work_struct *work)
+static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
+{
+ struct tb *tb = xd->tb;
+ u8 slw, tlw, sls, tls;
+ int ret;
+
+ dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
+ xd->remote_uuid);
+
+ ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
+ xd->state_retries, &slw, &tlw, &sls,
+ &tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to request remote link status, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "failed to receive remote link status\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
+
+ if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
+ dev_dbg(&xd->dev, "remote adapter is single lane only\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
+ unsigned int width)
+{
+ struct tb_port *port = tb_xdomain_downstream_port(xd);
+ struct tb *tb = xd->tb;
+ u8 tlw, tls;
+ u32 val;
+ int ret;
+
+ if (width == 2)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
+ else if (width == 1)
+ tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
+ else
+ return -EINVAL;
+
+ /* Use the current target speed */
+ ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+ tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
+
+ dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
+ tlw, tls);
+
+ ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
+ xd->state_retries, tlw, tls);
+ if (ret) {
+ if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to change remote link state, retrying\n");
+ return -EAGAIN;
+ }
+ dev_err(&xd->dev, "failed request link state change, aborting\n");
+ return ret;
+ }
+
+ dev_dbg(&xd->dev, "received link state change response\n");
+ return 0;
+}
+
+static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
+{
+ unsigned int width, width_mask;
+ struct tb_port *port;
+ int ret;
+
+ if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
+ width = TB_LINK_WIDTH_SINGLE;
+ width_mask = width;
+ } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
+ width = TB_LINK_WIDTH_DUAL;
+ width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
+ } else {
+ if (xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "link state change request not received yet, retrying\n");
+ return -EAGAIN;
+ }
+ dev_dbg(&xd->dev, "timeout waiting for link change request\n");
+ return -ETIMEDOUT;
+ }
+
+ port = tb_xdomain_downstream_port(xd);
+
+ /*
+ * We can't use tb_xdomain_lane_bonding_enable() here because it
+ * is the other side that initiates lane bonding. So here we
+ * just set the width to both lane adapters and wait for the
+ * link to transition bonded.
+ */
+ ret = tb_port_set_link_width(port->dual_link_port, width);
+ if (ret) {
+ tb_port_warn(port->dual_link_port,
+ "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_set_link_width(port, width);
+ if (ret) {
+ tb_port_warn(port, "failed to set link width to %d\n", width);
+ return ret;
+ }
+
+ ret = tb_port_wait_for_link_width(port, width_mask,
+ XDOMAIN_BONDING_TIMEOUT);
+ if (ret) {
+ dev_warn(&xd->dev, "error waiting for link width to become %d\n",
+ width_mask);
+ return ret;
+ }
+
+ port->bonded = width > TB_LINK_WIDTH_SINGLE;
+ port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
+
+ tb_port_update_credits(port);
+ tb_xdomain_update_link_attributes(xd);
+
+ dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
+ return 0;
+}
+
+static int tb_xdomain_get_properties(struct tb_xdomain *xd)
{
- struct tb_xdomain *xd = container_of(work, typeof(*xd),
- get_properties_work.work);
struct tb_property_dir *dir;
struct tb *tb = xd->tb;
bool update = false;
@@ -837,41 +1392,36 @@ static void tb_xdomain_get_properties(struct work_struct *work)
u32 gen = 0;
int ret;
+ dev_dbg(&xd->dev, "requesting remote properties\n");
+
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
- xd->remote_uuid, xd->properties_retries,
+ xd->remote_uuid, xd->state_retries,
&block, &gen);
if (ret < 0) {
- if (xd->properties_retries-- > 0) {
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
- } else {
- /* Give up now */
- dev_err(&xd->dev,
- "failed read XDomain properties from %pUb\n",
- xd->remote_uuid);
+ if (xd->state_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to request remote properties, retrying\n");
+ return -EAGAIN;
}
- return;
- }
+ /* Give up now */
+ dev_err(&xd->dev, "failed read XDomain properties from %pUb\n",
+ xd->remote_uuid);
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
+ return ret;
+ }
mutex_lock(&xd->lock);
/* Only accept newer generation properties */
- if (xd->properties && gen <= xd->property_block_gen) {
- /*
- * On resume it is likely that the properties block is
- * not changed (unless the other end added or removed
- * services). However, we need to make sure the existing
- * DMA paths are restored properly.
- */
- tb_xdomain_restore_paths(xd);
+ if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
+ ret = 0;
goto err_free_block;
}
dir = tb_property_parse_dir(block, ret);
if (!dir) {
dev_err(&xd->dev, "failed to parse XDomain properties\n");
+ ret = -ENOMEM;
goto err_free_block;
}
@@ -882,15 +1432,15 @@ static void tb_xdomain_get_properties(struct work_struct *work)
}
/* Release the existing one */
- if (xd->properties) {
- tb_property_free_dir(xd->properties);
+ if (xd->remote_properties) {
+ tb_property_free_dir(xd->remote_properties);
update = true;
}
- xd->properties = dir;
- xd->property_block_gen = gen;
+ xd->remote_properties = dir;
+ xd->remote_property_block_gen = gen;
- tb_xdomain_restore_paths(xd);
+ tb_xdomain_update_link_attributes(xd);
mutex_unlock(&xd->lock);
@@ -902,22 +1452,237 @@ static void tb_xdomain_get_properties(struct work_struct *work)
* registered, we notify the userspace that it has changed.
*/
if (!update) {
+ /*
+ * Now disable lane 1 if bonding was not enabled. Do
+ * this only if bonding was possible at the beginning
+ * (that is we are the connection manager and there are
+ * two lanes).
+ */
+ if (xd->bonding_possible) {
+ struct tb_port *port;
+
+ port = tb_xdomain_downstream_port(xd);
+ if (!port->bonded)
+ tb_port_disable(port->dual_link_port);
+ }
+
+ dev_dbg(&xd->dev, "current link speed %u.0 Gb/s\n",
+ xd->link_speed);
+ dev_dbg(&xd->dev, "current link width %s\n",
+ tb_width_name(xd->link_width));
+
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
- return;
+ return -ENODEV;
}
+ dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
+ xd->vendor, xd->device);
+ if (xd->vendor_name && xd->device_name)
+ dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
+ xd->device_name);
+
+ tb_xdomain_debugfs_init(xd);
} else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
}
enumerate_services(xd);
- return;
+ return 0;
err_free_dir:
tb_property_free_dir(dir);
err_free_block:
kfree(block);
mutex_unlock(&xd->lock);
+
+ return ret;
+}
+
+static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_UUID;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_LINK_STATUS2;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
+{
+ if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
+ dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
+ xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
+ } else {
+ dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
+ xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
+ }
+
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_PROPERTIES;
+ xd->state_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
+{
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+static void tb_xdomain_failed(struct tb_xdomain *xd)
+{
+ xd->state = XDOMAIN_STATE_ERROR;
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
+static void tb_xdomain_state_work(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
+ int ret, state = xd->state;
+
+ if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
+ state > XDOMAIN_STATE_ERROR))
+ return;
+
+ dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
+
+ switch (state) {
+ case XDOMAIN_STATE_INIT:
+ if (xd->needs_uuid) {
+ tb_xdomain_queue_uuid(xd);
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_UUID:
+ ret = tb_xdomain_get_uuid(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_failed(xd);
+ } else {
+ tb_xdomain_queue_properties_changed(xd);
+ if (xd->bonding_possible)
+ tb_xdomain_queue_link_status(xd);
+ else
+ tb_xdomain_queue_properties(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+
+ /*
+ * If any of the lane bonding states fail we skip
+ * bonding completely and try to continue from
+ * reading properties.
+ */
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATE_CHANGE:
+ ret = tb_xdomain_link_state_change(xd, 2);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_link_status2(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_LINK_STATUS2:
+ ret = tb_xdomain_get_link_status(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ } else {
+ tb_xdomain_queue_bonding_uuid_low(xd);
+ }
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_LOW:
+ tb_xdomain_lane_bonding_enable(xd);
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_BONDING_UUID_HIGH:
+ if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_PROPERTIES:
+ ret = tb_xdomain_get_properties(xd);
+ if (ret) {
+ if (ret == -EAGAIN)
+ goto retry_state;
+ tb_xdomain_failed(xd);
+ } else {
+ xd->state = XDOMAIN_STATE_ENUMERATED;
+ }
+ break;
+
+ case XDOMAIN_STATE_ENUMERATED:
+ tb_xdomain_queue_properties(xd);
+ break;
+
+ case XDOMAIN_STATE_ERROR:
+ dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
+ __stop_handshake(xd);
+ break;
+
+ default:
+ dev_warn(&xd->dev, "unexpected state %d\n", state);
+ break;
+ }
+
+ return;
+
+retry_state:
+ queue_delayed_work(xd->tb->wq, &xd->state_work,
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_properties_changed(struct work_struct *work)
@@ -926,17 +1691,23 @@ static void tb_xdomain_properties_changed(struct work_struct *work)
properties_changed_work.work);
int ret;
+ dev_dbg(&xd->dev, "sending properties changed notification\n");
+
ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
xd->properties_changed_retries, xd->local_uuid);
if (ret) {
- if (xd->properties_changed_retries-- > 0)
+ if (xd->properties_changed_retries-- > 0) {
+ dev_dbg(&xd->dev,
+ "failed to send properties changed notification, retrying\n");
queue_delayed_work(xd->tb->wq,
&xd->properties_changed_work,
- msecs_to_jiffies(1000));
+ msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+ }
+ dev_err(&xd->dev, "failed to send properties changed notification\n");
return;
}
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
+ xd->properties_changed_retries = XDOMAIN_RETRIES;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
@@ -944,7 +1715,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->device);
+ return sysfs_emit(buf, "%#x\n", xd->device);
}
static DEVICE_ATTR_RO(device);
@@ -956,19 +1727,28 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
mutex_unlock(&xd->lock);
return ret;
}
static DEVICE_ATTR_RO(device_name);
+static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
+}
+static DEVICE_ATTR_RO(maxhopid);
+
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%#x\n", xd->vendor);
+ return sysfs_emit(buf, "%#x\n", xd->vendor);
}
static DEVICE_ATTR_RO(vendor);
@@ -980,7 +1760,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
- ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
+ ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
mutex_unlock(&xd->lock);
return ret;
@@ -992,20 +1772,88 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
- return sprintf(buf, "%pUb\n", xd->remote_uuid);
+ return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
}
static DEVICE_ATTR_RO(unique_id);
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+
+ return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
+}
+
+static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
+static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
+
+static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ unsigned int width;
+
+ switch (xd->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%u\n", width);
+}
+static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
+
+static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
+ unsigned int width;
+
+ switch (xd->link_width) {
+ case TB_LINK_WIDTH_SINGLE:
+ case TB_LINK_WIDTH_ASYM_RX:
+ width = 1;
+ break;
+ case TB_LINK_WIDTH_DUAL:
+ width = 2;
+ break;
+ case TB_LINK_WIDTH_ASYM_TX:
+ width = 3;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ return sysfs_emit(buf, "%u\n", width);
+}
+static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
+
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
&dev_attr_device_name.attr,
+ &dev_attr_maxhopid.attr,
+ &dev_attr_rx_lanes.attr,
+ &dev_attr_rx_speed.attr,
+ &dev_attr_tx_lanes.attr,
+ &dev_attr_tx_speed.attr,
&dev_attr_unique_id.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
NULL,
};
-static struct attribute_group xdomain_attr_group = {
+static const struct attribute_group xdomain_attr_group = {
.attrs = xdomain_attrs,
};
@@ -1020,7 +1868,10 @@ static void tb_xdomain_release(struct device *dev)
put_device(xd->dev.parent);
- tb_property_free_dir(xd->properties);
+ kfree(xd->local_property_block);
+ tb_property_free_dir(xd->remote_properties);
+ ida_destroy(&xd->out_hopids);
+ ida_destroy(&xd->in_hopids);
ida_destroy(&xd->service_ids);
kfree(xd->local_uuid);
@@ -1030,27 +1881,6 @@ static void tb_xdomain_release(struct device *dev)
kfree(xd);
}
-static void start_handshake(struct tb_xdomain *xd)
-{
- xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
- xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
-
- /* Start exchanging properties with the other host */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
-}
-
-static void stop_handshake(struct tb_xdomain *xd)
-{
- xd->properties_retries = 0;
- xd->properties_changed_retries = 0;
-
- cancel_delayed_work_sync(&xd->get_properties_work);
- cancel_delayed_work_sync(&xd->properties_changed_work);
-}
-
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
{
stop_handshake(tb_to_xdomain(dev));
@@ -1059,15 +1889,7 @@ static int __maybe_unused tb_xdomain_suspend(struct device *dev)
static int __maybe_unused tb_xdomain_resume(struct device *dev)
{
- struct tb_xdomain *xd = tb_to_xdomain(dev);
-
- /*
- * Ask tb_xdomain_get_properties() restore any existing DMA
- * paths after properties are re-read.
- */
- xd->resume = true;
- start_handshake(xd);
-
+ start_handshake(tb_to_xdomain(dev));
return 0;
}
@@ -1075,30 +1897,82 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,
};
EXPORT_SYMBOL_GPL(tb_xdomain_type);
+static void tb_xdomain_link_init(struct tb_xdomain *xd, struct tb_port *down)
+{
+ if (!down->dual_link_port)
+ return;
+
+ /*
+ * Gen 4 links come up already as bonded so only update the port
+ * structures here.
+ */
+ if (tb_port_get_link_generation(down) >= 4) {
+ down->bonded = true;
+ down->dual_link_port->bonded = true;
+ } else {
+ xd->bonding_possible = true;
+ }
+}
+
+static void tb_xdomain_link_exit(struct tb_xdomain *xd)
+{
+ struct tb_port *down = tb_xdomain_downstream_port(xd);
+
+ if (!down->dual_link_port)
+ return;
+
+ if (tb_port_get_link_generation(down) >= 4) {
+ down->bonded = false;
+ down->dual_link_port->bonded = false;
+ } else if (xd->link_width > TB_LINK_WIDTH_SINGLE) {
+ /*
+ * Just return port structures back to way they were and
+ * update credits. No need to update userspace because
+ * the XDomain is removed soon anyway.
+ */
+ tb_port_lane_bonding_disable(down);
+ tb_port_update_credits(down);
+ } else if (down->dual_link_port) {
+ /*
+ * Re-enable the lane 1 adapter we disabled at the end
+ * of tb_xdomain_get_properties().
+ */
+ tb_port_enable(down->dual_link_port);
+ }
+}
+
/**
* tb_xdomain_alloc() - Allocate new XDomain object
* @tb: Domain where the XDomain belongs
- * @parent: Parent device (the switch through the connection to the
- * other domain is reached).
+ * @parent: Parent device (the switch through which the other domain
+ * is reached).
* @route: Route string used to reach the other domain
* @local_uuid: Our local domain UUID
- * @remote_uuid: UUID of the other domain
+ * @remote_uuid: UUID of the other domain (optional)
*
* Allocates new XDomain structure and returns pointer to that. The
* object must be released by calling tb_xdomain_put().
+ *
+ * Return: Pointer to &struct tb_xdomain, %NULL in case of failure.
*/
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
const uuid_t *remote_uuid)
{
+ struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_xdomain *xd;
+ struct tb_port *down;
+
+ /* Make sure the downstream domain is accessible */
+ down = tb_port_at(route, parent_sw);
+ tb_port_unlock(down);
xd = kzalloc(sizeof(*xd), GFP_KERNEL);
if (!xd)
@@ -1106,9 +1980,12 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->tb = tb;
xd->route = route;
+ xd->local_max_hopid = down->config.max_in_hop_id;
ida_init(&xd->service_ids);
+ ida_init(&xd->in_hopids);
+ ida_init(&xd->out_hopids);
mutex_init(&xd->lock);
- INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
+ INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
@@ -1116,9 +1993,16 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
if (!xd->local_uuid)
goto err_free;
- xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
- if (!xd->remote_uuid)
- goto err_free_local_uuid;
+ if (remote_uuid) {
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
+ GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+ } else {
+ xd->needs_uuid = true;
+
+ tb_xdomain_link_init(xd, down);
+ }
device_initialize(&xd->dev);
xd->dev.parent = get_device(parent);
@@ -1127,6 +2011,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->dev.groups = xdomain_attr_groups;
dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
+ dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
+ if (remote_uuid)
+ dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
+
/*
* This keeps the DMA powered on as long as we have active
* connection to another host.
@@ -1176,10 +2064,14 @@ static int unregister_service(struct device *dev, void *data)
*/
void tb_xdomain_remove(struct tb_xdomain *xd)
{
+ tb_xdomain_debugfs_remove(xd);
+
stop_handshake(xd);
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
+ tb_xdomain_link_exit(xd);
+
/*
* Undo runtime PM here explicitly because it is possible that
* the XDomain was never added to the bus and thus device_del()
@@ -1189,80 +2081,218 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
pm_runtime_put_noidle(&xd->dev);
pm_runtime_set_suspended(&xd->dev);
- if (!device_is_registered(&xd->dev))
+ if (!device_is_registered(&xd->dev)) {
put_device(&xd->dev);
- else
+ } else {
+ dev_info(&xd->dev, "host disconnected\n");
device_unregister(&xd->dev);
+ }
}
/**
- * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
* @xd: XDomain connection
- * @transmit_path: HopID of the transmit path the other end is using to
- * send packets
- * @transmit_ring: DMA ring used to receive packets from the other end
- * @receive_path: HopID of the receive path the other end is using to
- * receive packets
- * @receive_ring: DMA ring used to send packets to the other end
*
- * The function enables DMA paths accordingly so that after successful
- * return the caller can send and receive packets using high-speed DMA
- * path.
+ * Lane bonding is disabled by default for XDomains. This function tries
+ * to enable bonding by first enabling the port and waiting for the CL0
+ * state.
*
- * Return: %0 in case of success and negative errno in case of error
+ * Return: %0 on success, negative errno otherwise.
*/
-int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
- u16 transmit_ring, u16 receive_path,
- u16 receive_ring)
+int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
+ unsigned int width_mask;
+ struct tb_port *port;
int ret;
- mutex_lock(&xd->lock);
+ port = tb_xdomain_downstream_port(xd);
+ if (!port->dual_link_port)
+ return -ENODEV;
- if (xd->transmit_path) {
- ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
- goto exit_unlock;
+ ret = tb_port_enable(port->dual_link_port);
+ if (ret)
+ return ret;
+
+ ret = tb_wait_for_port(port->dual_link_port, true);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -ENOTCONN;
+
+ ret = tb_port_lane_bonding_enable(port);
+ if (ret) {
+ tb_port_warn(port, "failed to enable lane bonding\n");
+ return ret;
}
- xd->transmit_path = transmit_path;
- xd->transmit_ring = transmit_ring;
- xd->receive_path = receive_path;
- xd->receive_ring = receive_ring;
+ /* Any of the widths are all bonded */
+ width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
+ TB_LINK_WIDTH_ASYM_RX;
- ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
+ ret = tb_port_wait_for_link_width(port, width_mask,
+ XDOMAIN_BONDING_TIMEOUT);
+ if (ret) {
+ tb_port_warn(port, "failed to enable lane bonding\n");
+ return ret;
+ }
-exit_unlock:
- mutex_unlock(&xd->lock);
+ tb_port_update_credits(port);
+ tb_xdomain_update_link_attributes(xd);
- return ret;
+ dev_dbg(&xd->dev, "lane bonding enabled\n");
+ return 0;
}
-EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
/**
- * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * tb_xdomain_lane_bonding_disable() - Disable lane bonding
* @xd: XDomain connection
*
- * This does the opposite of tb_xdomain_enable_paths(). After call to
- * this the caller is not expected to use the rings anymore.
- *
- * Return: %0 in case of success and negative errno in case of error
+ * Lane bonding is disabled by default for XDomains. If bonding has been
+ * enabled, this function can be used to disable it.
*/
-int tb_xdomain_disable_paths(struct tb_xdomain *xd)
+void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
- int ret = 0;
+ struct tb_port *port;
- mutex_lock(&xd->lock);
- if (xd->transmit_path) {
- xd->transmit_path = 0;
- xd->transmit_ring = 0;
- xd->receive_path = 0;
- xd->receive_ring = 0;
+ port = tb_xdomain_downstream_port(xd);
+ if (port->dual_link_port) {
+ int ret;
+
+ tb_port_lane_bonding_disable(port);
+ ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
+ if (ret == -ETIMEDOUT)
+ tb_port_warn(port, "timeout disabling lane bonding\n");
+ tb_port_disable(port->dual_link_port);
+ tb_port_update_credits(port);
+ tb_xdomain_update_link_attributes(xd);
- ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
+ dev_dbg(&xd->dev, "lane bonding disabled\n");
}
- mutex_unlock(&xd->lock);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
- return ret;
+/**
+ * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
+ * @xd: XDomain connection
+ * @hopid: Preferred HopID or %-1 for next available
+ *
+ * Returned HopID is guaranteed to be within range supported by the input
+ * lane adapter.
+ * Call tb_xdomain_release_in_hopid() to release the allocated HopID.
+ *
+ * Return:
+ * * Allocated HopID - On success.
+ * * %-ENOSPC - If there are no more available HopIDs.
+ * * Negative errno - Another error occurred.
+ */
+int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
+{
+ if (hopid < 0)
+ hopid = TB_PATH_MIN_HOPID;
+ if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
+ return -EINVAL;
+
+ return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
+
+/**
+ * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
+ * @xd: XDomain connection
+ * @hopid: Preferred HopID or %-1 for next available
+ *
+ * Returned HopID is guaranteed to be within range supported by the
+ * output lane adapter.
+ * Call tb_xdomain_release_out_hopid() to release the allocated HopID.
+ *
+ * Return:
+ * * Allocated HopID - On success.
+ * * %-ENOSPC - If there are no more available HopIDs.
+ * * Negative errno - Another error occurred.
+ */
+int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
+{
+ if (hopid < 0)
+ hopid = TB_PATH_MIN_HOPID;
+ if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
+ return -EINVAL;
+
+ return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
+
+/**
+ * tb_xdomain_release_in_hopid() - Release input HopID
+ * @xd: XDomain connection
+ * @hopid: HopID to release
+ */
+void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
+{
+ ida_free(&xd->in_hopids, hopid);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
+
+/**
+ * tb_xdomain_release_out_hopid() - Release output HopID
+ * @xd: XDomain connection
+ * @hopid: HopID to release
+ */
+void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
+{
+ ida_free(&xd->out_hopids, hopid);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
+
+/**
+ * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID we are using to send out packets
+ * @transmit_ring: DMA ring used to send out packets
+ * @receive_path: HopID the other end is using to send packets to us
+ * @receive_ring: DMA ring used to receive packets from @receive_path
+ *
+ * The function enables DMA paths accordingly so that after successful
+ * return the caller can send and receive packets using high-speed DMA
+ * path. If a transmit or receive path is not needed, pass %-1 for those
+ * parameters.
+ *
+ * Return: %0 on success, negative errno otherwise.
+ */
+int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring)
+{
+ return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
+ transmit_ring, receive_path,
+ receive_ring);
+}
+EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
+
+/**
+ * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
+ * @xd: XDomain connection
+ * @transmit_path: HopID we are using to send out packets
+ * @transmit_ring: DMA ring used to send out packets
+ * @receive_path: HopID the other end is using to send packets to us
+ * @receive_ring: DMA ring used to receive packets from @receive_path
+ *
+ * This does the opposite of tb_xdomain_enable_paths(). After call to
+ * this the caller is not expected to use the rings anymore. Passing %-1
+ * as path/ring parameter means don't care. Normally the callers should
+ * pass the same values here as they do when paths are enabled.
+ *
+ * Return: %0 on success, negative errno otherwise.
+ */
+int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
+ int transmit_ring, int receive_path,
+ int receive_ring)
+{
+ return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
+ transmit_ring, receive_path,
+ receive_ring);
}
EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
@@ -1276,30 +2306,26 @@ struct tb_xdomain_lookup {
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
- int i;
+ struct tb_port *port;
- for (i = 1; i <= sw->config.max_port_number; i++) {
- struct tb_port *port = &sw->ports[i];
+ tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
- if (tb_is_upstream_port(port))
- continue;
-
if (port->xdomain) {
xd = port->xdomain;
if (lookup->uuid) {
- if (uuid_equal(xd->remote_uuid, lookup->uuid))
+ if (xd->remote_uuid &&
+ uuid_equal(xd->remote_uuid, lookup->uuid))
+ return xd;
+ } else {
+ if (lookup->link && lookup->link == xd->link &&
+ lookup->depth == xd->depth)
+ return xd;
+ if (lookup->route && lookup->route == xd->route)
return xd;
- } else if (lookup->link &&
- lookup->link == xd->link &&
- lookup->depth == xd->depth) {
- return xd;
- } else if (lookup->route &&
- lookup->route == xd->route) {
- return xd;
}
- } else if (port->remote) {
+ } else if (tb_port_has_remote(port)) {
xd = switch_find_xdomain(port->remote->sw, lookup);
if (xd)
return xd;
@@ -1323,6 +2349,8 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
@@ -1352,6 +2380,8 @@ EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth)
@@ -1381,6 +2411,8 @@ struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
+ *
+ * Return: Pointer to &struct tb_xdomain or %NULL if not found.
*/
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
{
@@ -1416,10 +2448,8 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
* handlers in turn.
*/
if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
- if (type == TB_CFG_PKG_XDOMAIN_REQ) {
- tb_xdp_schedule_request(tb, hdr, size);
- return true;
- }
+ if (type == TB_CFG_PKG_XDOMAIN_REQ)
+ return tb_xdp_schedule_request(tb, hdr, size);
return false;
}
@@ -1440,35 +2470,6 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
return ret > 0;
}
-static int rebuild_property_block(void)
-{
- u32 *block, len;
- int ret;
-
- ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
- if (ret < 0)
- return ret;
-
- len = ret;
-
- block = kcalloc(len, sizeof(u32), GFP_KERNEL);
- if (!block)
- return -ENOMEM;
-
- ret = tb_property_format_dir(xdomain_property_dir, block, len);
- if (ret) {
- kfree(block);
- return ret;
- }
-
- kfree(xdomain_property_block);
- xdomain_property_block = block;
- xdomain_property_block_len = len;
- xdomain_property_block_gen++;
-
- return 0;
-}
-
static int update_xdomain(struct device *dev, void *data)
{
struct tb_xdomain *xd;
@@ -1510,7 +2511,7 @@ static bool remove_directory(const char *key, const struct tb_property_dir *dir)
* notified so they can re-read properties of this host if they are
* interested.
*
- * Return: %0 on success and negative errno on failure
+ * Return: %0 on success, negative errno otherwise.
*/
int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
{
@@ -1533,11 +2534,7 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
if (ret)
goto err_unlock;
- ret = rebuild_property_block();
- if (ret) {
- remove_directory(key, dir);
- goto err_unlock;
- }
+ xdomain_property_block_gen++;
mutex_unlock(&xdomain_lock);
update_all_xdomains();
@@ -1563,7 +2560,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
mutex_lock(&xdomain_lock);
if (remove_directory(key, dir))
- ret = rebuild_property_block();
+ xdomain_property_block_gen++;
mutex_unlock(&xdomain_lock);
if (!ret)
@@ -1573,8 +2570,6 @@ EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
int tb_xdomain_init(void)
{
- int ret;
-
xdomain_property_dir = tb_property_create_dir(NULL);
if (!xdomain_property_dir)
return -ENOMEM;
@@ -1583,26 +2578,20 @@ int tb_xdomain_init(void)
* Initialize standard set of properties without any service
* directories. Those will be added by service drivers
* themselves when they are loaded.
+ *
+ * Rest of the properties are filled dynamically based on these
+ * when the P2P connection is made.
*/
- tb_property_add_immediate(xdomain_property_dir, "vendorid",
- PCI_VENDOR_ID_INTEL);
- tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
- tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
- tb_property_add_text(xdomain_property_dir, "deviceid",
- utsname()->nodename);
+ tb_property_add_immediate(xdomain_property_dir, "vendorid", 0x1d6b);
+ tb_property_add_text(xdomain_property_dir, "vendorid", "Linux");
+ tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x0004);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
- ret = rebuild_property_block();
- if (ret) {
- tb_property_free_dir(xdomain_property_dir);
- xdomain_property_dir = NULL;
- }
-
- return ret;
+ xdomain_property_block_gen = get_random_u32();
+ return 0;
}
void tb_xdomain_exit(void)
{
- kfree(xdomain_property_block);
tb_property_free_dir(xdomain_property_dir);
}