summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/tunnel.c
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2021-01-14 20:27:58 +0300
committerMika Westerberg <mika.westerberg@linux.intel.com>2021-03-18 18:25:31 +0300
commite5876559b579975e054fdf747c08077628fad175 (patch)
treea5f3fb845eb4cea7645043b1e4d1c48cceab7637 /drivers/thunderbolt/tunnel.c
parent46b494f286812a88caba28dd0810cf3a55747431 (diff)
thunderbolt: Use dedicated flow control for DMA tunnels
The USB4 inter-domain service spec recommends using dedicated flow control scheme so update the driver accordingly. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/thunderbolt/tunnel.c')
-rw-r--r--drivers/thunderbolt/tunnel.c20
1 files changed, 4 insertions, 16 deletions
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6557b6e07009..2e7ec037a73e 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -794,24 +794,14 @@ static u32 tb_dma_credits(struct tb_port *nhi)
return min(max_credits, 13U);
}
-static int tb_dma_activate(struct tb_tunnel *tunnel, bool active)
-{
- struct tb_port *nhi = tunnel->src_port;
- u32 credits;
-
- credits = active ? tb_dma_credits(nhi) : 0;
- return tb_port_set_initial_credits(nhi, credits);
-}
-
-static void tb_dma_init_path(struct tb_path *path, unsigned int isb,
- unsigned int efc, u32 credits)
+static void tb_dma_init_path(struct tb_path *path, unsigned int efc, u32 credits)
{
int i;
path->egress_fc_enable = efc;
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
- path->ingress_shared_buffer = isb;
+ path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 5;
path->weight = 1;
path->clear_fc = true;
@@ -856,7 +846,6 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
if (!tunnel)
return NULL;
- tunnel->activate = tb_dma_activate;
tunnel->src_port = nhi;
tunnel->dst_port = dst;
@@ -869,8 +858,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tb_tunnel_free(tunnel);
return NULL;
}
- tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL,
- credits);
+ tb_dma_init_path(path, TB_PATH_SOURCE | TB_PATH_INTERNAL, credits);
tunnel->paths[i++] = path;
}
@@ -881,7 +869,7 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
tb_tunnel_free(tunnel);
return NULL;
}
- tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits);
+ tb_dma_init_path(path, TB_PATH_ALL, credits);
tunnel->paths[i++] = path;
}