summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2021-01-08 12:55:49 +0200
committerMika Westerberg <mika.westerberg@linux.intel.com>2021-03-18 18:25:30 +0300
commit8ccbed2476f2a615d5045a7c5c7b459db7dd9263 (patch)
tree516365daaa03a29687d63ebc680e26bc07026df2 /drivers/thunderbolt
parentd29c59b1a4dc74ab0b27c540f39e766906d30e29 (diff)
thunderbolt: Do not re-establish XDomain DMA paths automatically
This step is actually not needed. The service drivers themselves will handle this once they have negotiated the service up and running again with the remote side. Also dropping this makes it easier to add support for multiple DMA tunnels over a single XDomain connection. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/xdomain.c35
1 files changed, 2 insertions, 33 deletions
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 584bb5ec06f8..a1657663a95e 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -946,19 +946,6 @@ static int populate_properties(struct tb_xdomain *xd,
return 0;
}
-/* Called with @xd->lock held */
-static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
-{
- if (!xd->resume)
- return;
-
- xd->resume = false;
- if (xd->transmit_path) {
- dev_dbg(&xd->dev, "re-establishing DMA path\n");
- tb_domain_approve_xdomain_paths(xd->tb, xd);
- }
-}
-
static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
{
return tb_to_switch(xd->dev.parent);
@@ -1084,16 +1071,8 @@ static void tb_xdomain_get_properties(struct work_struct *work)
mutex_lock(&xd->lock);
/* Only accept newer generation properties */
- if (xd->properties && gen <= xd->property_block_gen) {
- /*
- * On resume it is likely that the properties block is
- * not changed (unless the other end added or removed
- * services). However, we need to make sure the existing
- * DMA paths are restored properly.
- */
- tb_xdomain_restore_paths(xd);
+ if (xd->properties && gen <= xd->property_block_gen)
goto err_free_block;
- }
dir = tb_property_parse_dir(block, ret);
if (!dir) {
@@ -1118,8 +1097,6 @@ static void tb_xdomain_get_properties(struct work_struct *work)
tb_xdomain_update_link_attributes(xd);
- tb_xdomain_restore_paths(xd);
-
mutex_unlock(&xd->lock);
kfree(block);
@@ -1332,15 +1309,7 @@ static int __maybe_unused tb_xdomain_suspend(struct device *dev)
static int __maybe_unused tb_xdomain_resume(struct device *dev)
{
- struct tb_xdomain *xd = tb_to_xdomain(dev);
-
- /*
- * Ask tb_xdomain_get_properties() restore any existing DMA
- * paths after properties are re-read.
- */
- xd->resume = true;
- start_handshake(xd);
-
+ start_handshake(tb_to_xdomain(dev));
return 0;
}