summaryrefslogtreecommitdiff
path: root/drivers/fpga/dfl-afu-error.c
blob: ab7be62173681ae0a0c97676d319eb5032ed30f2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
// SPDX-License-Identifier: GPL-2.0
/*
 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
 *
 * Copyright 2019 Intel Corporation, Inc.
 *
 * Authors:
 *   Wu Hao <hao.wu@linux.intel.com>
 *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
 *   Joseph Grecco <joe.grecco@intel.com>
 *   Enno Luebbers <enno.luebbers@intel.com>
 *   Tim Whisonant <tim.whisonant@intel.com>
 *   Ananda Ravuri <ananda.ravuri@intel.com>
 *   Mitchel Henry <henry.mitchel@intel.com>
 */

#include <linux/fpga-dfl.h>
#include <linux/uaccess.h>

#include "dfl-afu.h"

#define PORT_ERROR_MASK		0x8
#define PORT_ERROR		0x10
#define PORT_FIRST_ERROR	0x18
#define PORT_MALFORMED_REQ0	0x20
#define PORT_MALFORMED_REQ1	0x28

#define ERROR_MASK		GENMASK_ULL(63, 0)

/* mask or unmask port errors by the error mask register. */
static void __afu_port_err_mask(struct device *dev, bool mask)
{
	void __iomem *base;

	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);

	writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
}

static void afu_port_err_mask(struct device *dev, bool mask)
{
	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);

	mutex_lock(&pdata->lock);
	__afu_port_err_mask(dev, mask);
	mutex_unlock(&pdata->lock);
}

/* clear port errors. */
static int afu_port_err_clear(struct device *dev, u64 err)
{
	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
	struct platform_device *pdev = to_platform_device(dev);
	void __iomem *base_err, *base_hdr;
	int enable_ret = 0, ret = -EBUSY;
	u64 v;

	base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
	base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);

	mutex_lock(&pdata->lock);

	/*
	 * clear Port Errors
	 *
	 * - Check for AP6 State
	 * - Halt Port by keeping Port in reset
	 * - Set PORT Error mask to all 1 to mask errors
	 * - Clear all errors
	 * - Set Port mask to all 0 to enable errors
	 * - All errors start capturing new errors
	 * - Enable Port by pulling the port out of reset
	 */

	/* if device is still in AP6 power state, can not clear any error. */
	v = readq(base_hdr + PORT_HDR_STS);
	if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
		dev_err(dev, "Could not clear errors, device in AP6 state.\n");
		goto done;
	}

	/* Halt Port by keeping Port in reset */
	ret = __afu_port_disable(pdev);
	if (ret)
		goto done;

	/* Mask all errors */
	__afu_port_err_mask(dev, true);

	/* Clear errors if err input matches with current port errors.*/
	v = readq(base_err + PORT_ERROR);

	if (v == err) {
		writeq(v, base_err + PORT_ERROR);

		v = readq(base_err + PORT_FIRST_ERROR);
		writeq(v, base_err + PORT_FIRST_ERROR);
	} else {
		dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
			 __func__, v, err);
		ret = -EINVAL;
	}

	/* Clear mask */
	__afu_port_err_mask(dev, false);

	/* Enable the Port by clearing the reset */
	enable_ret = __afu_port_enable(pdev);

done:
	mutex_unlock(&pdata->lock);
	return enable_ret ? enable_ret : ret;
}

static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
{
	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
	void __iomem *base;
	u64 error;

	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);

	mutex_lock(&pdata->lock);
	error = readq(base + PORT_ERROR);
	mutex_unlock(&pdata->lock);

	return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}

static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
			    const char *buff, size_t count)
{
	u64 value;
	int ret;

	if (kstrtou64(buff, 0, &value))
		return -EINVAL;

	ret = afu_port_err_clear(dev, value);

	return ret ? ret : count;
}
static DEVICE_ATTR_RW(errors);

static ssize_t first_error_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
	void __iomem *base;
	u64 error;

	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);

	mutex_lock(&pdata->lock);
	error = readq(base + PORT_FIRST_ERROR);
	mutex_unlock(&pdata->lock);

	return sprintf(buf, "0x%llx\n", (unsigned long long)error);
}
static DEVICE_ATTR_RO(first_error);

static ssize_t first_malformed_req_show(struct device *dev,
					struct device_attribute *attr,
					char *buf)
{
	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
	void __iomem *base;
	u64 req0, req1;

	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);

	mutex_lock(&pdata->lock);
	req0 = readq(base + PORT_MALFORMED_REQ0);
	req1 = readq(base + PORT_MALFORMED_REQ1);
	mutex_unlock(&pdata->lock);

	return sprintf(buf, "0x%016llx%016llx\n",
		       (unsigned long long)req1, (unsigned long long)req0);
}
static DEVICE_ATTR_RO(first_malformed_req);

static struct attribute *port_err_attrs[] = {
	&dev_attr_errors.attr,
	&dev_attr_first_error.attr,
	&dev_attr_first_malformed_req.attr,
	NULL,
};

static umode_t port_err_attrs_visible(struct kobject *kobj,
				      struct attribute *attr, int n)
{
	struct device *dev = kobj_to_dev(kobj);

	/*
	 * sysfs entries are visible only if related private feature is
	 * enumerated.
	 */
	if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
		return 0;

	return attr->mode;
}

const struct attribute_group port_err_group = {
	.name       = "errors",
	.attrs      = port_err_attrs,
	.is_visible = port_err_attrs_visible,
};

static int port_err_init(struct platform_device *pdev,
			 struct dfl_feature *feature)
{
	afu_port_err_mask(&pdev->dev, false);

	return 0;
}

static void port_err_uinit(struct platform_device *pdev,
			   struct dfl_feature *feature)
{
	afu_port_err_mask(&pdev->dev, true);
}

static long
port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
	       unsigned int cmd, unsigned long arg)
{
	switch (cmd) {
	case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
	case DFL_FPGA_PORT_ERR_SET_IRQ:
		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
	default:
		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
		return -ENODEV;
	}
}

const struct dfl_feature_id port_err_id_table[] = {
	{.id = PORT_FEATURE_ID_ERROR,},
	{0,}
};

const struct dfl_feature_ops port_err_ops = {
	.init = port_err_init,
	.uinit = port_err_uinit,
	.ioctl = port_err_ioctl,
};