summaryrefslogtreecommitdiff
path: root/drivers/thunderbolt/tunnel.h
blob: 7f6d3a18a41e810951e4a2ad33da534351d2d359 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Thunderbolt driver - Tunneling support
 *
 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
 * Copyright (C) 2019, Intel Corporation
 */

#ifndef TB_TUNNEL_H_
#define TB_TUNNEL_H_

#include "tb.h"

enum tb_tunnel_type {
	TB_TUNNEL_PCI,
	TB_TUNNEL_DP,
	TB_TUNNEL_DMA,
	TB_TUNNEL_USB3,
};

/**
 * enum tb_tunnel_state - State of a tunnel
 * @TB_TUNNEL_INACTIVE: tb_tunnel_activate() is not called for the tunnel
 * @TB_TUNNEL_ACTIVATING: tb_tunnel_activate() returned successfully for the tunnel
 * @TB_TUNNEL_ACTIVE: The tunnel is fully active
 */
enum tb_tunnel_state {
	TB_TUNNEL_INACTIVE,
	TB_TUNNEL_ACTIVATING,
	TB_TUNNEL_ACTIVE,
};

/**
 * struct tb_tunnel - Tunnel between two ports
 * @kref: Reference count
 * @tb: Pointer to the domain
 * @src_port: Source port of the tunnel
 * @dst_port: Destination port of the tunnel. For discovered incomplete
 *	      tunnels may be %NULL or null adapter port instead.
 * @paths: All paths required by the tunnel
 * @npaths: Number of paths in @paths
 * @pre_activate: Optional tunnel specific initialization called before
 *		  activation. Can touch hardware.
 * @activate: Optional tunnel specific activation/deactivation
 * @post_deactivate: Optional tunnel specific de-initialization called
 *		     after deactivation. Can touch hardware.
 * @destroy: Optional tunnel specific callback called when the tunnel
 *	     memory is being released. Should not touch hardware.
 * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
 * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
 * @alloc_bandwidth: Change tunnel bandwidth allocation
 * @consumed_bandwidth: Return how much bandwidth the tunnel consumes
 * @release_unused_bandwidth: Release all unused bandwidth
 * @reclaim_available_bandwidth: Reclaim back available bandwidth
 * @list: Tunnels are linked using this field
 * @type: Type of the tunnel
 * @state: Current state of the tunnel
 * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel.
 *	    Only set if the bandwidth needs to be limited.
 * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel.
 *	      Only set if the bandwidth needs to be limited.
 * @allocated_up: Allocated upstream bandwidth (only for USB3)
 * @allocated_down: Allocated downstream bandwidth (only for USB3)
 * @bw_mode: DP bandwidth allocation mode registers can be used to
 *	     determine consumed and allocated bandwidth
 * @dprx_canceled: Was DPRX capabilities read poll canceled
 * @dprx_timeout: If set DPRX capabilities read poll work will timeout after this passes
 * @dprx_work: Worker that is scheduled to poll completion of DPRX capabilities read
 * @callback: Optional callback called when DP tunnel is fully activated
 * @callback_data: Optional data for @callback
 */
struct tb_tunnel {
	struct kref kref;
	struct tb *tb;
	struct tb_port *src_port;
	struct tb_port *dst_port;
	struct tb_path **paths;
	size_t npaths;
	int (*pre_activate)(struct tb_tunnel *tunnel);
	int (*activate)(struct tb_tunnel *tunnel, bool activate);
	void (*post_deactivate)(struct tb_tunnel *tunnel);
	void (*destroy)(struct tb_tunnel *tunnel);
	int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
				 int *max_down);
	int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
				   int *allocated_down);
	int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
			       int *alloc_down);
	int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
				  int *consumed_down);
	int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
	void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel,
					    int *available_up,
					    int *available_down);
	struct list_head list;
	enum tb_tunnel_type type;
	enum tb_tunnel_state state;
	int max_up;
	int max_down;
	int allocated_up;
	int allocated_down;
	bool bw_mode;
	bool dprx_canceled;
	ktime_t dprx_timeout;
	struct delayed_work dprx_work;
	void (*callback)(struct tb_tunnel *tunnel, void *data);
	void *callback_data;
};

struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
					 bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
				      struct tb_port *down);
bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
			    int *reserved_down);
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
					bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
				     struct tb_port *out, int link_nr,
				     int max_up, int max_down,
				     void (*callback)(struct tb_tunnel *, void *),
				     void *callback_data);
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
				      struct tb_port *dst, int transmit_path,
				      int transmit_ring, int receive_path,
				      int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
			 int transmit_ring, int receive_path, int receive_ring);
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
					  bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
				       struct tb_port *down, int max_up,
				       int max_down);

void tb_tunnel_put(struct tb_tunnel *tunnel);
int tb_tunnel_activate(struct tb_tunnel *tunnel);
void tb_tunnel_deactivate(struct tb_tunnel *tunnel);

/**
 * tb_tunnel_is_active() - Is tunnel fully activated
 * @tunnel: Tunnel to check
 *
 * Returns %true if @tunnel is fully activated. For other than DP
 * tunnels this is pretty much once tb_tunnel_activate() returns
 * successfully. However, for DP tunnels this returns %true only once the
 * DPRX capabilities read has been issued successfully.
 */
static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
{
	return tunnel->state == TB_TUNNEL_ACTIVE;
}

bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
			    const struct tb_port *port);
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
				int *max_down);
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
				  int *allocated_down);
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
			      int *alloc_down);
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
				 int *consumed_down);
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
					   int *available_up,
					   int *available_down);

static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel)
{
	return tunnel->type == TB_TUNNEL_PCI;
}

static inline bool tb_tunnel_is_dp(const struct tb_tunnel *tunnel)
{
	return tunnel->type == TB_TUNNEL_DP;
}

static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel)
{
	return tunnel->type == TB_TUNNEL_DMA;
}

static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
{
	return tunnel->type == TB_TUNNEL_USB3;
}

static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
{
	return tb_port_path_direction_downstream(tunnel->src_port,
						 tunnel->dst_port);
}

const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);

#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...)                   \
	do {                                                            \
		struct tb_tunnel *__tunnel = (tunnel);                  \
		level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt,   \
		      tb_route(__tunnel->src_port->sw),                 \
		      __tunnel->src_port->port,                         \
		      tb_route(__tunnel->dst_port->sw),                 \
		      __tunnel->dst_port->port,                         \
		      tb_tunnel_type_name(__tunnel),			\
		      ## arg);                                          \
	} while (0)

#define tb_tunnel_WARN(tunnel, fmt, arg...) \
	__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
#define tb_tunnel_warn(tunnel, fmt, arg...) \
	__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
#define tb_tunnel_info(tunnel, fmt, arg...) \
	__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
#define tb_tunnel_dbg(tunnel, fmt, arg...) \
	__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)

#endif