1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
|
// SPDX-License-Identifier: MIT
/*
* Copyright © 2013-2021 Intel Corporation
*/
#include "i915_drv.h"
#include "i915_iosf_mbi.h"
#include "i915_reg.h"
#include "vlv_iosf_sb.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
* VLV_VLV2_PUNIT_HAS_0.8.docx
*/
/* Standard MMIO read, non-posted */
#define SB_MRD_NP 0x00
/* Standard MMIO write, non-posted */
#define SB_MWR_NP 0x01
/* Private register read, double-word addressing, non-posted */
#define SB_CRRDDA_NP 0x06
/* Private register write, double-word addressing, non-posted */
#define SB_CRWRDA_NP 0x07
static void ping(void *info)
{
}
static void __vlv_punit_get(struct drm_i915_private *i915)
{
iosf_mbi_punit_acquire();
/*
* Prevent the cpu from sleeping while we use this sideband, otherwise
* the punit may cause a machine hang. The issue appears to be isolated
* with changing the power state of the CPU package while changing
* the power state via the punit, and we have only observed it
* reliably on 4-core Baytail systems suggesting the issue is in the
* power delivery mechanism and likely to be board/function
* specific. Hence we presume the workaround needs only be applied
* to the Valleyview P-unit and not all sideband communications.
*/
if (IS_VALLEYVIEW(i915)) {
cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos, 0);
on_each_cpu(ping, NULL, 1);
}
}
static void __vlv_punit_put(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
cpu_latency_qos_update_request(&i915->vlv_iosf_sb.qos,
PM_QOS_DEFAULT_VALUE);
iosf_mbi_punit_release();
}
void vlv_iosf_sb_get(struct drm_device *drm, unsigned long unit_mask)
{
struct drm_i915_private *i915 = to_i915(drm);
if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_get(i915);
mutex_lock(&i915->vlv_iosf_sb.lock);
i915->vlv_iosf_sb.locked_unit_mask |= unit_mask;
}
void vlv_iosf_sb_put(struct drm_device *drm, unsigned long unit_mask)
{
struct drm_i915_private *i915 = to_i915(drm);
i915->vlv_iosf_sb.locked_unit_mask &= ~unit_mask;
drm_WARN_ON(drm, i915->vlv_iosf_sb.locked_unit_mask);
mutex_unlock(&i915->vlv_iosf_sb.lock);
if (unit_mask & BIT(VLV_IOSF_SB_PUNIT))
__vlv_punit_put(i915);
}
static int vlv_sideband_rw(struct drm_i915_private *i915,
u32 devfn, u32 port, u32 opcode,
u32 addr, u32 *val)
{
struct intel_uncore *uncore = &i915->uncore;
const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
int err;
lockdep_assert_held(&i915->vlv_iosf_sb.lock);
if (port == IOSF_PORT_PUNIT)
iosf_mbi_assert_punit_acquired();
/* Flush the previous comms, just in case it failed last time. */
if (intel_wait_for_register(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
is_read ? "read" : "write");
return -EAGAIN;
}
preempt_disable();
intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
(devfn << IOSF_DEVFN_SHIFT) |
(opcode << IOSF_OPCODE_SHIFT) |
(port << IOSF_PORT_SHIFT) |
(0xf << IOSF_BYTE_ENABLES_SHIFT) |
(0 << IOSF_BAR_SHIFT) |
IOSF_SB_BUSY);
if (__intel_wait_for_register_fw(uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
10000, 0, NULL) == 0) {
if (is_read)
*val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
err = 0;
} else {
drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
is_read ? "read" : "write");
err = -ETIMEDOUT;
}
preempt_enable();
return err;
}
static u32 unit_to_devfn(enum vlv_iosf_sb_unit unit)
{
if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2 ||
unit == VLV_IOSF_SB_FLISDSI)
return DPIO_DEVFN;
else
return PCI_DEVFN(0, 0);
}
static u32 unit_to_port(enum vlv_iosf_sb_unit unit)
{
switch (unit) {
case VLV_IOSF_SB_BUNIT:
return IOSF_PORT_BUNIT;
case VLV_IOSF_SB_CCK:
return IOSF_PORT_CCK;
case VLV_IOSF_SB_CCU:
return IOSF_PORT_CCU;
case VLV_IOSF_SB_DPIO:
return IOSF_PORT_DPIO;
case VLV_IOSF_SB_DPIO_2:
return IOSF_PORT_DPIO_2;
case VLV_IOSF_SB_FLISDSI:
return IOSF_PORT_FLISDSI;
case VLV_IOSF_SB_GPIO:
return 0; /* FIXME: unused */
case VLV_IOSF_SB_NC:
return IOSF_PORT_NC;
case VLV_IOSF_SB_PUNIT:
return IOSF_PORT_PUNIT;
default:
return 0;
}
}
static u32 unit_to_opcode(enum vlv_iosf_sb_unit unit, bool write)
{
if (unit == VLV_IOSF_SB_DPIO || unit == VLV_IOSF_SB_DPIO_2)
return write ? SB_MWR_NP : SB_MRD_NP;
else
return write ? SB_CRWRDA_NP : SB_CRRDDA_NP;
}
u32 vlv_iosf_sb_read(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr)
{
struct drm_i915_private *i915 = to_i915(drm);
u32 devfn, port, opcode, val = 0;
devfn = unit_to_devfn(unit);
port = unit_to_port(unit);
opcode = unit_to_opcode(unit, false);
if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
return 0;
drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
return val;
}
int vlv_iosf_sb_write(struct drm_device *drm, enum vlv_iosf_sb_unit unit, u32 addr, u32 val)
{
struct drm_i915_private *i915 = to_i915(drm);
u32 devfn, port, opcode;
devfn = unit_to_devfn(unit);
port = unit_to_port(unit);
opcode = unit_to_opcode(unit, true);
if (drm_WARN_ONCE(&i915->drm, !port, "invalid unit %d\n", unit))
return -EINVAL;
drm_WARN_ON(&i915->drm, !(i915->vlv_iosf_sb.locked_unit_mask & BIT(unit)));
return vlv_sideband_rw(i915, devfn, port, opcode, addr, &val);
}
void vlv_iosf_sb_init(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
mutex_init(&i915->vlv_iosf_sb.lock);
if (IS_VALLEYVIEW(i915))
cpu_latency_qos_add_request(&i915->vlv_iosf_sb.qos, PM_QOS_DEFAULT_VALUE);
}
void vlv_iosf_sb_fini(struct drm_i915_private *i915)
{
if (IS_VALLEYVIEW(i915))
cpu_latency_qos_remove_request(&i915->vlv_iosf_sb.qos);
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
mutex_destroy(&i915->vlv_iosf_sb.lock);
}
|