summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2018-09-28 16:30:16 +0300
committerMika Westerberg <mika.westerberg@linux.intel.com>2019-04-18 11:18:53 +0300
commit3b4b3235ca5bf1b69ff15a269d9881b2604dd4fa (patch)
tree898f4be72c6341c783cbfcc23a0baca7500e9fd2 /drivers/thunderbolt
parent559c1e1e013437bf190469efbcbd8bc803285853 (diff)
thunderbolt: Add XDomain UUID exchange support
Currently ICM has been handling XDomain UUID exchange so there was no need to have it in the driver yet. However, since now we are going to add the same capabilities to the software connection manager it needs to be handled properly. For this reason modify the driver XDomain protocol handling so that if the remote domain UUID is not filled in the core will query it first and only then start the normal property exchange flow. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/xdomain.c136
2 files changed, 137 insertions, 10 deletions
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 02c84aa3d018..afbe1d29bb03 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -492,6 +492,17 @@ struct tb_xdp_header {
u32 type;
};
+struct tb_xdp_uuid {
+ struct tb_xdp_header hdr;
+};
+
+struct tb_xdp_uuid_response {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ u32 src_route_hi;
+ u32 src_route_lo;
+};
+
struct tb_xdp_properties {
struct tb_xdp_header hdr;
uuid_t src_uuid;
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 44d3b2e486cd..5118d46702d5 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -18,6 +18,7 @@
#include "tb.h"
#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
+#define XDOMAIN_UUID_RETRIES 10
#define XDOMAIN_PROPERTIES_RETRIES 60
#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
@@ -222,6 +223,50 @@ static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
return 0;
}
+static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
+ uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+ struct tb_xdp_uuid req;
+ int ret;
+
+ memset(&req, 0, sizeof(req));
+ tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
+ sizeof(req));
+
+ memset(&res, 0, sizeof(res));
+ ret = __tb_xdomain_request(ctl, &req, sizeof(req),
+ TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP,
+ XDOMAIN_DEFAULT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ ret = tb_xdp_handle_error(&res.hdr);
+ if (ret)
+ return ret;
+
+ uuid_copy(uuid, &res.src_uuid);
+ return 0;
+}
+
+static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
+ const uuid_t *uuid)
+{
+ struct tb_xdp_uuid_response res;
+
+ memset(&res, 0, sizeof(res));
+ tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
+ sizeof(res));
+
+ uuid_copy(&res.src_uuid, uuid);
+ res.src_route_hi = upper_32_bits(route);
+ res.src_route_lo = lower_32_bits(route);
+
+ return __tb_xdomain_response(ctl, &res, sizeof(res),
+ TB_CFG_PKG_XDOMAIN_RESP);
+}
+
static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
enum tb_xdp_error error)
{
@@ -512,7 +557,14 @@ static void tb_xdp_handle_request(struct work_struct *work)
break;
}
+ case UUID_REQUEST_OLD:
+ case UUID_REQUEST:
+ ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+ break;
+
default:
+ tb_xdp_error_response(ctl, route, sequence,
+ ERROR_NOT_SUPPORTED);
break;
}
@@ -839,6 +891,55 @@ static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
}
}
+static void tb_xdomain_get_uuid(struct work_struct *work)
+{
+ struct tb_xdomain *xd = container_of(work, typeof(*xd),
+ get_uuid_work.work);
+ struct tb *tb = xd->tb;
+ uuid_t uuid;
+ int ret;
+
+ ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid);
+ if (ret < 0) {
+ if (xd->uuid_retries-- > 0) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ dev_dbg(&xd->dev, "failed to read remote UUID\n");
+ }
+ return;
+ }
+
+ if (uuid_equal(&uuid, xd->local_uuid)) {
+ dev_dbg(&xd->dev, "intra-domain loop detected\n");
+ return;
+ }
+
+ /*
+ * If the UUID is different, there is another domain connected
+ * so mark this one unplugged and wait for the connection
+ * manager to replace it.
+ */
+ if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
+ dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
+ xd->is_unplugged = true;
+ return;
+ }
+
+ /* First time fill in the missing UUID */
+ if (!xd->remote_uuid) {
+ xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
+ if (!xd->remote_uuid)
+ return;
+ }
+
+ /* Now we can start the normal properties exchange */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+}
+
static void tb_xdomain_get_properties(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd),
@@ -1045,21 +1146,29 @@ static void tb_xdomain_release(struct device *dev)
static void start_handshake(struct tb_xdomain *xd)
{
+ xd->uuid_retries = XDOMAIN_UUID_RETRIES;
xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
- /* Start exchanging properties with the other host */
- queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
- msecs_to_jiffies(100));
- queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
- msecs_to_jiffies(1000));
+ if (xd->needs_uuid) {
+ queue_delayed_work(xd->tb->wq, &xd->get_uuid_work,
+ msecs_to_jiffies(100));
+ } else {
+ /* Start exchanging properties with the other host */
+ queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
+ msecs_to_jiffies(100));
+ queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
+ msecs_to_jiffies(1000));
+ }
}
static void stop_handshake(struct tb_xdomain *xd)
{
+ xd->uuid_retries = 0;
xd->properties_retries = 0;
xd->properties_changed_retries = 0;
+ cancel_delayed_work_sync(&xd->get_uuid_work);
cancel_delayed_work_sync(&xd->get_properties_work);
cancel_delayed_work_sync(&xd->properties_changed_work);
}
@@ -1102,7 +1211,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_type);
* other domain is reached).
* @route: Route string used to reach the other domain
* @local_uuid: Our local domain UUID
- * @remote_uuid: UUID of the other domain
+ * @remote_uuid: UUID of the other domain (optional)
*
* Allocates new XDomain structure and returns pointer to that. The
* object must be released by calling tb_xdomain_put().
@@ -1121,6 +1230,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
xd->route = route;
ida_init(&xd->service_ids);
mutex_init(&xd->lock);
+ INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid);
INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
@@ -1129,9 +1239,14 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
if (!xd->local_uuid)
goto err_free;
- xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
- if (!xd->remote_uuid)
- goto err_free_local_uuid;
+ if (remote_uuid) {
+ xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
+ GFP_KERNEL);
+ if (!xd->remote_uuid)
+ goto err_free_local_uuid;
+ } else {
+ xd->needs_uuid = true;
+ }
device_initialize(&xd->dev);
xd->dev.parent = get_device(parent);
@@ -1299,7 +1414,8 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
xd = port->xdomain;
if (lookup->uuid) {
- if (uuid_equal(xd->remote_uuid, lookup->uuid))
+ if (xd->remote_uuid &&
+ uuid_equal(xd->remote_uuid, lookup->uuid))
return xd;
} else if (lookup->link &&
lookup->link == xd->link &&